prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import abc
from dbac_lib import dbac_util
import numpy as np
import pandas as pd
import os
import logging
import json
logger = logging.getLogger(__name__)
DB_NAMES = ['cub200', 'awa2']
_DB_SPLIT_KEYS = ['train_exps', 'test_exps', 'train_imgs', 'val_imgs', 'test_imgs', 'valid_prims',
'train_combs', 'test_combs']
DB_IMAGE_SPLITS = ['discarded', 'train', 'val', 'test']
DB_EXP_SPLITS = ['train', 'test']
DB_COMB_SPLITS = ['train', 'test']
class IDataset(metaclass=abc.ABCMeta):
def __init__(self, name, root_path):
# dataset name
self.name = name
# path to dataset root directory
self.root_path = root_path
# Placeholders
# array of labels names [M]
self.labels_names = None
# array of images path [N]
self.images_path = None
# array of labels [NXM]
self.labels = None
# array of labels group names [G]
self.labels_group_names = None
# array of labels groups [M] => [G]
self.labels_group = None
# Placeholder for the split file
# boolean array of valid primitives [M]
self.valid_primitives = None
# Valid Expression
# array of valid expressions (op, p1, p2) [E]
self.expressions = None
# array of expressions split [E] (0,1)
self.expressions_split = None
# array for split the images in train, val and test [N]
self.images_split = None
# Place holders for combinations
# Combinations of expressions ((),()...) [C]
self.combinations = None
# Combinations splits [C] (0, 1, 2)
self.combinations_split = None
@abc.abstractmethod
def load_split(self, split_file, comb_file=None):
raise NotImplementedError()
@staticmethod
def factory(name, root_path):
db = None
if name == DB_NAMES[0]:
db = CUB200(root_path)
elif name == DB_NAMES[1]:
db = AWA2(root_path)
else:
raise ValueError("Dataset {} in directory {} is not defined.".format(name, root_path))
return db
class CUB200(IDataset):
def __init__(self, root_path):
super().__init__(DB_NAMES[0], root_path)
# read general info
df_att = pd.read_csv(os.path.join(self.root_path, 'attributes/attributes.txt'), sep='\s+',
names=['att_id', 'att_name'])
df_att_ant = pd.read_csv(os.path.join(self.root_path, 'attributes/image_attribute_labels.txt'), sep='\s+',
names=['img_id', 'att_id', 'is_pres', 'cert_id', 'time'])
df_images = pd.read_csv(os.path.join(self.root_path, 'images.txt'), sep='\s+', names=['img_id', 'img_path'])
df_labels = pd.read_csv(os.path.join(self.root_path, 'classes.txt'), sep='\s+', names=['cls_id', 'cls_name'])
df_is_train = pd.read_csv(os.path.join(self.root_path, 'train_test_split.txt'), sep='\s+',
names=['img_id', 'is_train'])
df_data = pd.read_csv(os.path.join(self.root_path, 'image_class_labels.txt'), sep='\s+',
names=['img_id', 'cls_id'])
# merge informations
df_data =
|
pd.merge(df_images, df_data, on='img_id', how='left')
|
pandas.merge
|
from argparse import ArgumentParser
import json
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
from statistics import mean
from math import exp, log
import code
import pdb
model_order = [
"classical", "adversarial", "all", "human"
]
def setup_args():
parser = ArgumentParser()
parser.add_argument('-m', '--model', type=str, default=None, help='model file')
parser.add_argument('-rf', '--results-files', type=str, default=None, help='json files of results')
parser.add_argument('-pf', '--plot-folder', type=str, default=None, help='folder to save plots in')
args = parser.parse_args()
return args
def plot_accs_in_expectation_joint(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
labels = []
tick_centers = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
# if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
tick_centers.append((float(low) + float(high)) / 2)
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Human Accuracy in Expectation': [], 'Accuracy in Expectation': [], 'Model': []}
big_results_dict = {'Human Accuracy in Expectation': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('human_expected_acc')
model_accs = metrics.get('expected_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [tick_centers[item] for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
human_model = ["human"] * len(human_acc_bins)
if binned_avg_model_acc:
big_results_dict['Human Accuracy in Expectation'].extend(human_accs)
big_results_dict['Model'].extend(human_model)
results_dict['Model'].extend(model)
results_dict['Human Accuracy in Expectation'].extend(human_acc_bins)
results_dict['Accuracy in Expectation'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df = pd.DataFrame(results_dict)
fine_grained_df = pd.DataFrame(big_results_dict)
g = sns.JointGrid(
data=df,
x="Human Accuracy in Expectation",
y="Accuracy in Expectation",
hue="Model",
hue_order=model_order,
)
g.figure.delaxes(g.ax_marg_y)
g.plot_joint(
sns.lineplot,
**{
"data": df,
"palette": "muted",
"style": "Model",
"markers": ["o", "o", "o", "o"],
"dashes": False,
"markeredgecolor": None
}
)
sns.kdeplot(
data=fine_grained_df,
x='Human Accuracy in Expectation',
ax=g.ax_marg_x,
hue="Model",
hue_order=model_order,
clip = [tick_centers[0], tick_centers[-1]],
fill = True,
legend=False
)
g.ax_joint.set_xticks(tick_centers)
g.ax_joint.set_xticklabels(labels, fontsize=8)
os.makedirs(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()])), exist_ok=True)
plt.savefig(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()]), 'acc_expectation_joint.png'), bbox_inches='tight')
plt.clf()
def plot_accs_in_expectation(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Human Accuracy in Expectation': [], 'Accuracy in Expectation': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('human_expected_acc')
model_accs = metrics.get('expected_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [item for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
if binned_avg_model_acc:
results_dict['Model'].extend(model)
results_dict['Human Accuracy in Expectation'].extend(human_acc_bins)
results_dict['Accuracy in Expectation'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df = pd.DataFrame(results_dict)
g = sns.lineplot(
x="Human Accuracy in Expectation",
y="Accuracy in Expectation",
hue="Model",
hue_order=model_order,
data=df,
palette="muted",
style="Model",
markers=["o", "o", "o", "o"],
dashes=False,
markeredgecolor=None
)
labels = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
g.set_xticks(range(len(labels)))
g.set_xticklabels(labels, fontsize=8)
os.makedirs(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()])), exist_ok=True)
plt.savefig(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()]), 'acc_expectation.png'))
plt.clf()
def plot_accs_against_plurality(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Human Accuracy in Expectation': [], 'Accuracy Against Plurality': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('human_expected_acc')
model_accs = metrics.get('majority_vote_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [item for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
if binned_avg_model_acc:
results_dict['Model'].extend(model)
results_dict['Human Accuracy in Expectation'].extend(human_acc_bins)
results_dict['Accuracy Against Plurality'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df = pd.DataFrame(results_dict)
g = sns.lineplot(
x="Human Accuracy in Expectation",
y="Accuracy Against Plurality",
hue="Model",
hue_order=model_order,
data=df,
palette="muted",
style="Model",
markers=["o", "o", "o", "o"],
dashes=False,
markeredgecolor=None
)
labels = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
g.set_xticks(range(len(labels)))
g.set_xticklabels(labels, fontsize=8)
os.makedirs(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()])), exist_ok=True)
plt.savefig(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()]), 'acc_plurality.png'))
plt.clf()
def plot_accs_against_plurality_joint(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
labels = []
tick_centers = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
# if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
tick_centers.append((float(low) + float(high)) / 2)
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Human Accuracy in Expectation': [], 'Accuracy Against Plurality': [], 'Model': []}
big_results_dict = {'Human Accuracy in Expectation': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('human_expected_acc')
model_accs = metrics.get('majority_vote_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [tick_centers[item] for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
human_model = ["human"] * len(human_acc_bins)
if binned_avg_model_acc:
big_results_dict['Human Accuracy in Expectation'].extend(human_accs)
big_results_dict['Model'].extend(human_model)
results_dict['Model'].extend(model)
results_dict['Human Accuracy in Expectation'].extend(human_acc_bins)
results_dict['Accuracy Against Plurality'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df = pd.DataFrame(results_dict)
fine_grained_df = pd.DataFrame(big_results_dict)
g = sns.JointGrid(
data=df,
x="Human Accuracy in Expectation",
y="Accuracy Against Plurality",
hue="Model",
hue_order=model_order,
)
g.figure.delaxes(g.ax_marg_y)
g.plot_joint(
sns.lineplot,
**{
"data": df,
"palette": "muted",
"style": "Model",
"markers": ["o", "o", "o", "o"],
"dashes": False,
"markeredgecolor": None
}
)
sns.kdeplot(
data=fine_grained_df,
x='Human Accuracy in Expectation',
ax=g.ax_marg_x,
hue="Model",
hue_order=model_order,
clip = [tick_centers[0], tick_centers[-1]],
fill = True,
legend=False
)
g.ax_joint.set_xticks(tick_centers)
g.ax_joint.set_xticklabels(labels, fontsize=8)
os.makedirs(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()])), exist_ok=True)
plt.savefig(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()]), 'acc_plurality_joint.png'), bbox_inches='tight')
plt.clf()
def plot_calibration_curve(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
labels = []
tick_centers = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
# if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
tick_centers.append((float(low) + float(high)) / 2)
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Model Confidence': [], 'Accuracy in Expectation': [], 'Model': []}
big_results_dict = {'Model Confidence': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('model_confidence')
model_accs = metrics.get('expected_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [tick_centers[item] for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
if binned_avg_model_acc:
big_results_dict['Model Confidence'].extend(human_accs)
big_results_dict['Model'].extend(model)
results_dict['Model'].extend(model)
results_dict['Model Confidence'].extend(human_acc_bins)
results_dict['Accuracy in Expectation'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df = pd.DataFrame(results_dict)
fine_grained_df = pd.DataFrame(big_results_dict)
g = sns.JointGrid(
data=df,
x="Model Confidence",
y="Accuracy in Expectation",
hue="Model",
hue_order=model_order,
)
g.figure.delaxes(g.ax_marg_y)
g.plot_joint(
sns.lineplot,
**{
"data": df,
"palette": "muted",
"style": "Model",
"markers": ["o", "o", "o", "o"],
"dashes": False,
"markeredgecolor": None
}
)
sns.kdeplot(
data=fine_grained_df,
x='Model Confidence',
ax=g.ax_marg_x,
hue="Model",
hue_order=model_order,
clip = [tick_centers[0], tick_centers[-1]],
fill = True,
legend=False
)
g.ax_joint.set_xticks(tick_centers)
g.ax_joint.set_xticklabels(labels, fontsize=8)
os.makedirs(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()])), exist_ok=True)
plt.savefig(os.path.join(folder, '_'.join([k[0] for k in metrics_dict.keys()]), 'calibration_curve_expectation.png'), bbox_inches='tight')
plt.clf()
def plot_calibration_curve_plurality(metrics_dict, folder):
bin_high = 1
bin_low = 1/3
num_bins = 8
bin_ends = [round(bin_low + i / num_bins * (bin_high - bin_low), 2) for i in range(num_bins + 1)]
labels = []
tick_centers = []
str_bin_ends = [str(num).lstrip('0') for num in bin_ends]
for i, (low, high) in enumerate(zip(str_bin_ends, str_bin_ends[1:])):
# if i in results_dict['Human Accuracy in Expectation']:
labels.append('[' + low + '-' + high + ']')
tick_centers.append((float(low) + float(high)) / 2)
def find_bin(value):
i = 0
while value > bin_ends[i + 1]:
i += 1
return i
results_dict = {'Model Confidence': [], 'Accuracy Against Plurality': [], 'Model': []}
big_results_dict = {'Model Confidence': [], 'Model': []}
for (name, task_name), metrics in metrics_dict.items():
human_accs = metrics.get('model_confidence')
model_accs = metrics.get('majority_vote_acc')
binned_model_accs = {i:[] for i in range(len(bin_ends) - 1)}
for ha, ma in zip(human_accs, model_accs):
bin = find_bin(ha)
binned_model_accs[bin].append(ma)
binned_avg_model_acc = {i:binned_model_accs[i] for i in range(len(bin_ends) - 1) if len(binned_model_accs[i]) > 0}
human_acc_bins = [[i]*len(binned_avg_model_acc[i]) for i in range(len(bin_ends) - 1)]
human_acc_bins = [tick_centers[item] for sublist in human_acc_bins for item in sublist]
model = [name] * len(human_acc_bins)
if binned_avg_model_acc:
big_results_dict['Model Confidence'].extend(human_accs)
big_results_dict['Model'].extend(model)
results_dict['Model'].extend(model)
results_dict['Model Confidence'].extend(human_acc_bins)
results_dict['Accuracy Against Plurality'].extend([item for sublist in binned_avg_model_acc.values() for item in sublist])
sns.set_theme()
df =
|
pd.DataFrame(results_dict)
|
pandas.DataFrame
|
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.to_dense(), second.to_dense()),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([
float('nan'),
float('nan'),
1, 0, 0,
2, 0, 0, 0,
3, 0, 0
])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
class TestSparseArrayAnalytics:
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.all(SparseArray(data), out=np.array([]))
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.any(SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), out=out)
@pytest.mark.parametrize("data,expected", [
(np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0]))),
(np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])))
])
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
@pytest.mark.parametrize('fill_value', [0.0, np.nan])
def test_modf(self, fill_value):
# https://github.com/pandas-dev/pandas/issues/26946
sparse = pd.SparseArray([fill_value] * 10 + [1.1, 2.2],
fill_value=fill_value)
r1, r2 = np.modf(sparse)
e1, e2 = np.modf(np.asarray(sparse))
tm.assert_sp_array_equal(r1, pd.SparseArray(e1, fill_value=fill_value))
tm.assert_sp_array_equal(r2, pd.SparseArray(e2, fill_value=fill_value))
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind='integer')
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr =
|
SparseArray([1, 2, 0, 0, 0], kind='block')
|
pandas.core.sparse.api.SparseArray
|
# To be run only once
if 0 == 1:
get_ipython().system('pip install gensim')
get_ipython().system('pip install PyLDAvis')
get_ipython().system('pip install spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
# Importing modules
import pandas as pd
import os
os.chdir('..')
# Read data into papers
papers =
|
pd.read_csv('./data/NIPS Papers/papers.csv')
|
pandas.read_csv
|
import numpy as np
import imageio
import os
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from brainio_base.stimuli import StimulusSet
class Stimulus:
def __init__(self, size_px=[448, 448], bit_depth=8,
stim_id=1000, save_dir='images', type_name='stimulus',
format_id='{0:04d}'):
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
self.type_name = type_name
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
self.size_px = size_px
self.objects = []
self.stimulus = np.ones(self.size_px, dtype=np.uint8) * self.gray
def add_object(self, stim_object):
self.objects.append(stim_object)
def build_stimulus(self):
for obj in self.objects:
self.stimulus[obj.mask] = obj.stimulus[obj.mask]
def clear_stimulus(self):
self.stimulus = np.ones(self.size, dtype=np.uint8) * self.gray
def show_stimulus(self):
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
file_name= self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
class Grating:
def __init__(self, orientation=0, phase=0, sf=2, size_px=[448, 448], width=8,
contrast=1, bit_depth=8, pos=[0, 0], rad=5, sig=0,
stim_id=1000, format_id='{0:04d}', save_dir='images', type_name='grating'):
# save directory
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
# label for type of stimulus
self.type_name = type_name
# 1 channel colors, white, black, grey
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
# pixel dimensions of the image
self.size_px = np.array(size_px)
# position of image in field of view
self.pos = np.array(pos)
# pixel to visual field degree conversion
self.px_to_deg = self.size_px[1] / width
# size of stimulus in visual field in degrees
self.size = self.size_px / self.px_to_deg
# orientation in radians
self.orientation = orientation / 180 * np.pi
# phase of the grating
self.phase = phase / 180 * np.pi
# spatial frequency of the grating
self.sf = sf
# contrast of the grating
self.contrast = contrast
# make self.xv and self.yv store the degree positions of all pixels in the image
self.xv = np.zeros(size_px)
self.yv = np.zeros(size_px)
self.update_frame()
self.mask = np.ones(size_px, dtype=bool)
self.set_circ_mask(rad=rad)
self.tex = np.zeros(size_px)
self.stimulus = np.ones(size_px, dtype=np.uint8) * self.gray
self.envelope = np.ones(size_px)
if sig is 0:
self.update_tex()
else:
self.set_gaussian_envelope(sig)
def update_frame(self):
x = (np.arange(self.size_px[1]) - self.size_px[1]/2) / self.px_to_deg - self.pos[1]
y = (np.arange(self.size_px[0]) - self.size_px[0]/2) / self.px_to_deg - self.pos[0]
# all possible degree coordinates in matrices of points
self.xv, self.yv = np.meshgrid(x, y)
def update_tex(self):
# make the grating pattern
self.tex = (np.sin((self.xv * np.cos(self.orientation) + self.yv * np.sin(self.orientation)) *
self.sf * 2 * np.pi + self.phase) * self.contrast * self.envelope)
def update_stimulus(self):
self.stimulus[self.mask] = np.uint8(((self.tex[self.mask]+1)/2)*self.white)
self.stimulus[np.logical_not(self.mask)] = self.gray
def set_circ_mask(self, rad):
# apply operation to put a 1 for all points inclusively within the degree radius and a 0 outside it
self.mask = self.xv**2 + self.yv**2 <= rad ** 2
# same as circular mask but for an annulus
def set_annular_mask(self, inner_rad, outer_rad):
self.mask = (self.xv ** 2 + self.yv ** 2 <= outer_rad ** 2) * \
(self.xv ** 2 + self.yv ** 2 > inner_rad ** 2)
def set_gaussian_envelope(self, sig):
d = np.sqrt(self.xv**2 + self.yv**2)
self.envelope = np.exp(-d**2/(2 * sig**2))
self.update_tex()
def show_stimulus(self):
# pyplot stuff
self.update_stimulus()
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
# save to correct (previously specified) directory
self.update_stimulus()
file_name = self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
def load_stim_info(stim_name, data_dir):
stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str})
image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values,
[os.path.join(data_dir, image_name) for image_name
in stim['image_file_name'].values]))
stim_set = StimulusSet(stim[stim.columns[:-1]])
stim_set.image_paths = image_paths
stim_set.identifier = stim_name
return stim_set
def gen_blank_stim(degrees, size_px, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
stim = Stimulus(size_px=[size_px, size_px], type_name='blank_stim', save_dir=save_dir, stim_id=0)
stimuli = pd.DataFrame({'image_id': str(0), 'degrees': [degrees]})
image_names = (stim.save_stimulus())
stimuli['image_file_name'] = pd.Series(image_names)
stimuli['image_current_local_file_path'] = pd.Series(save_dir + os.sep + image_names)
stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False)
def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
width = degrees
nStim = grat_params.shape[0]
print('Generating stimulus: #', nStim)
stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim})
image_names = nStim * [None]
image_local_file_path = nStim * [None]
all_y = nStim * [None]
all_x = nStim * [None]
all_c = nStim * [None]
all_r = nStim * [None]
all_s = nStim * [None]
all_o = nStim * [None]
all_p = nStim * [None]
for i in np.arange(nStim):
stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 +
grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6])
grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2],
rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5],
phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir,
size_px=[size_px, size_px], type_name=stim_name)
image_names[i] = (grat.save_stimulus())
image_local_file_path[i] = save_dir + os.sep + image_names[i]
all_y[i] = grat_params[i, 0]
all_x[i] = grat_params[i, 1]
all_c[i] = grat_params[i, 2]
all_r[i] = grat_params[i, 3]
all_s[i] = grat_params[i, 4]
all_o[i] = grat_params[i, 5]
all_p[i] = grat_params[i, 6]
stimuli['position_y'] = pd.Series(all_y)
stimuli['position_x'] = pd.Series(all_x)
stimuli['contrast'] = pd.Series(all_c)
stimuli['radius'] = pd.Series(all_r)
stimuli['spatial_frequency'] = pd.Series(all_s)
stimuli['orientation'] =
|
pd.Series(all_o)
|
pandas.Series
|
import pandas as pd
""""
Ginni's test - Stack year wise columns side by side
"""
"""
Function to update the column headers by corresponding year.
"""
def update_col_headers(df, year):
df_cols = df.columns.tolist()
df_cols_updated = []
for col in df_cols:
col = col + '_' + year
df_cols_updated.append(col)
return df_cols_updated
"""
Execution starts from here
"""
df_2013 = pd.read_excel('/Users/sshaik2/Criminal_Justice/Projects/main_census_merge/tests/test_data_files/ginni_2013.xlsx')
df_2013 = df_2013.drop(['YEAR'], axis=1)
df_2013.columns = update_col_headers(df_2013, '2013')
df_2014 =
|
pd.read_excel('/Users/sshaik2/Criminal_Justice/Projects/main_census_merge/tests/test_data_files/ginni_2014.xlsx')
|
pandas.read_excel
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Utilities for loading datasets
"""
import os
import pandas as pd
from ..utils.load_data import load_from_tsfile_to_dataframe
__all__ = ["load_gunpoint",
"load_arrow_head",
"load_italy_power_demand",
"load_basic_motions",
"load_shampoo_sales",
"load_longley"]
__author__ = ['<NAME>', '<NAME>']
DIRNAME = 'data'
MODULE = os.path.dirname(__file__)
# time series classification data sets
def _load_dataset(name, split, return_X_y):
"""
Helper function to load datasets.
"""
if split in ["TRAIN", "TEST"]:
fname = name + '_' + split + '.ts'
abspath = os.path.join(MODULE, DIRNAME, name, fname)
X, y = load_from_tsfile_to_dataframe(abspath)
elif split == "ALL":
X = pd.DataFrame()
y = pd.Series()
for split in ["TRAIN", "TEST"]:
fname = name + '_' + split + '.ts'
abspath = os.path.join(MODULE, DIRNAME, name, fname)
result = load_from_tsfile_to_dataframe(abspath)
X = pd.concat([X, pd.DataFrame(result[0])])
y = pd.concat([y, pd.Series(result[1])])
else:
raise ValueError("Invalid split value")
# Return appropriately
if return_X_y:
return X, y
else:
X['class_val'] = pd.Series(y)
return X
def load_gunpoint(split='TRAIN', return_X_y=False):
"""
Loads the GunPoint time series classification problem and returns X and y
Parameters
----------
split: str{"ALL", "TRAIN", "TEST"}, optional (default="TRAIN")
Whether to load the train or test partition of the problem. By default it loads the train split.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Details
-------
Dimensionality: univariate
Series length: 150
Train cases: 50
Test cases: 150
Number of classes: 2
This dataset involves one female actor and one male actor making a motion with their
hand. The two classes are: Gun-Draw and Point: For Gun-Draw the actors have their
hands by their sides. They draw a replicate gun from a hip-mounted holster, point it
at a target for approximately one second, then return the gun to the holster, and
their hands to their sides. For Point the actors have their gun by their sides.
They point with their index fingers to a target for approximately one second, and
then return their hands to their sides. For both classes, we tracked the centroid
of the actor's right hands in both X- and Y-axes, which appear to be highly
correlated. The data in the archive is just the X-axis.
Dataset details: http://timeseriesclassification.com/description.php?Dataset=GunPoint
"""
name = 'GunPoint'
return _load_dataset(name, split, return_X_y)
def load_italy_power_demand(split='TRAIN', return_X_y=False):
"""
Loads the ItalyPowerDemand time series classification problem and returns X and y
Parameters
----------
split: str{"ALL", "TRAIN", "TEST"}, optional (default="TRAIN")
Whether to load the train or test partition of the problem. By default it loads the train split.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Details
-------
Dimensionality: univariate
Series length: 24
Train cases: 67
Test cases: 1029
Number of classes: 2
The data was derived from twelve monthly electrical power demand time series from Italy and
first used in the paper "Intelligent Icons: Integrating Lite-Weight Data Mining and
Visualization into GUI Operating Systems". The classification task is to distinguish days
from Oct to March (inclusive) from April to September.
Dataset details: http://timeseriesclassification.com/description.php?Dataset=ItalyPowerDemand
"""
name = 'ItalyPowerDemand'
return _load_dataset(name, split, return_X_y)
def load_japanese_vowels(split='TRAIN', return_X_y=False):
"""
Loads the JapaneseVowels time series classification problem and returns X and y.
Parameters
----------
split: str{"ALL", "TRAIN", "TEST"}, optional (default="TRAIN")
Whether to load the train or test partition of the problem. By default it loads the train split.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Details
-------
Dimensionality: multivariate, 12
Series length: 29
Train cases: 270
Test cases: 370
Number of classes: 9
A UCI Archive dataset. 9 Japanese-male speakers were recorded saying the vowels 'a' and 'e'. A '12-degree
linear prediction analysis' is applied to the raw recordings to obtain time-series with 12 dimensions, a
originally a length between 7 and 29. In this dataset, instances have been padded to the longest length,
29. The classification task is to predict the speaker. Therefore, each instance is a transformed utterance,
12*29 values with a single class label attached, [1...9]. The given training set is comprised of 30
utterances for each speaker, however the test set has a varied distribution based on external factors of
timing and experimenal availability, between 24 and 88 instances per speaker. Reference: M. Kudo, <NAME>
and <NAME>. (1999). "Multidimensional Curve Classification Using Passing-Through Regions". Pattern
Recognition Letters, Vol. 20, No. 11--13, pages 1103--1111.
Dataset details: http://timeseriesclassification.com/description.php?Dataset=JapaneseVowels
"""
name = 'JapaneseVowels'
return _load_dataset(name, split, return_X_y)
def load_arrow_head(split='TRAIN', return_X_y=False):
"""
Loads the ArrowHead time series classification problem and returns X and y.
Parameters
----------
split: str{"ALL", "TRAIN", "TEST"}, optional (default="TRAIN")
Whether to load the train or test partition of the problem. By default it loads the train split.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Details
-------
Dimensionality: univariate
Series length: 251
Train cases: 36
Test cases: 175
Number of classes: 3
The arrowhead data consists of outlines of the images of arrowheads. The shapes of the
projectile points are converted into a time series using the angle-based method. The
classification of projectile points is an important topic in anthropology. The classes
are based on shape distinctions such as the presence and location of a notch in the
arrow. The problem in the repository is a length normalised version of that used in
Ye09shapelets. The three classes are called "Avonlea", "Clovis" and "Mix"."
Dataset details: http://timeseriesclassification.com/description.php?Dataset=ArrowHead
"""
name = 'ArrowHead'
return _load_dataset(name, split, return_X_y)
def load_basic_motions(split='TRAIN', return_X_y=False):
"""
Loads the ArrowHead time series classification problem and returns X and y.
Parameters
----------
split: str{"ALL", "TRAIN", "TEST"}, optional (default="TRAIN")
Whether to load the train or test partition of the problem. By default it loads the train split.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas DataFrame with m rows and c columns
The time series data for the problem with m cases and c dimensions
y: numpy array
The class labels for each case in X
Details
-------
Dimensionality: univariate
Series length: 251
Train cases: 36
Test cases: 175
Number of classes: 3
The arrowhead data consists of outlines of the images of arrowheads. The shapes of the
projectile points are converted into a time series using the angle-based method. The
classification of projectile points is an important topic in anthropology. The classes
are based on shape distinctions such as the presence and location of a notch in the
arrow. The problem in the repository is a length normalised version of that used in
Ye09shapelets. The three classes are called "Avonlea", "Clovis" and "Mix"."
Dataset details: http://timeseriesclassification.com/description.php?Dataset=ArrowHead
"""
name = 'BasicMotions'
return _load_dataset(name, split, return_X_y)
# forecasting data sets
def load_shampoo_sales(return_y_as_dataframe=False):
"""
Load the shampoo sales univariate time series dataset for forecasting.
Parameters
----------
return_y_as_dataframe: bool, optional (default=False)
Whether to return target series as series or dataframe, useful for high-level interface.
- If True, returns target series as pandas.DataFrame.s
- If False, returns target series as pandas.Series.
Returns
-------
y : pandas Series/DataFrame
Shampoo sales dataset
Details
-------
This dataset describes the monthly number of sales of shampoo over a 3 year period.
The units are a sales count.
Dimensionality: univariate
Series length: 36
Frequency: Monthly
Number of cases: 1
References
----------
..[1] Makridakis, Wheelwright and Hyndman (1998) Forecasting: methods and applications,
<NAME> & Sons: New York. Chapter 3.
"""
name = 'ShampooSales'
fname = name + '.csv'
path = os.path.join(MODULE, DIRNAME, name, fname)
data = pd.read_csv(path, index_col=0)
# change period index to simple numeric index
# TODO add support for period/datetime indexing
# data.index = pd.PeriodIndex(data.index, freq='M')
data = data.reset_index(drop=True)
if return_y_as_dataframe:
# return nested pandas DataFrame with a single row and column
return pd.DataFrame(pd.Series([pd.Series(data.squeeze())]), columns=[name])
else:
# return nested pandas Series with a single row
return pd.Series([data.iloc[:, 0]], name=name)
def load_longley(return_X_y=False, return_y_as_dataframe=False):
"""
Load the Longley multivariate time series dataset for forecasting with exogenous variables.
Parameters
----------
return_y_as_dataframe: bool, optional (default=False)
Whether to return target series as series or dataframe, useful for high-level interface.
- If True, returns target series as pandas.DataFrame.s
- If False, returns target series as pandas.Series.
return_X_y: bool, optional (default=False)
If True, returns (features, target) separately instead of a single dataframe with columns for
features and the target.
Returns
-------
X: pandas.DataFrame
The exogenous time series data for the problem.
y: pandas.Series
The target series to be predicted.
Details
-------
This dataset contains various US macroeconomic variables from 1947 to 1962 that are known to be highly
collinear.
Dimensionality: multivariate, 6
Series length: 16
Frequency: Yearly
Number of cases: 1
Variable description:
TOTEMP - Total employment (y)
GNPDEFL - Gross national product deflator
GNP - Gross national product
UNEMP - Number of unemployed
ARMED - Size of armed forces
POP - Population
YEAR - Calendar year (index)
References
----------
..[1] <NAME>. (1967) "An Appraisal of Least Squares Programs for the
Electronic Comptuer from the Point of View of the User." Journal of
the American Statistical Association. 62.319, 819-41.
(https://www.itl.nist.gov/div898/strd/lls/data/LINKS/DATA/Longley.dat)
"""
if return_y_as_dataframe and not return_X_y:
raise ValueError("`return_y_as_dataframe` can only be set to True if `return_X_y` is True, "
"otherwise y is given as a column in the returned dataframe and "
"cannot be returned as a separate dataframe.")
name = 'Longley'
fname = name + '.csv'
path = os.path.join(MODULE, DIRNAME, name, fname)
data = pd.read_csv(path, index_col=0)
data = data.set_index('YEAR')
# change period index to simple numeric index
# TODO add support for period/datetime indexing
# data.index = pd.PeriodIndex(data.index, freq='Y')
data = data.reset_index(drop=True)
# Get target series
yname = 'TOTEMP'
y = data.pop(yname)
y = pd.Series([y], name=yname)
# Get feature series
X = pd.DataFrame([pd.Series([data.iloc[:, i]]) for i in range(data.shape[1])]).T
X.columns = data.columns
if return_X_y:
if return_y_as_dataframe:
y = pd.DataFrame(pd.Series([pd.Series(y.squeeze())]), columns=[yname])
return X, y
else:
return X, y
else:
X[yname] = y
return X
def load_lynx(return_y_as_dataframe=False):
"""
Load the lynx univariate time series dataset for forecasting.
Parameters
----------
return_y_as_dataframe: bool, optional (default=False)
Whether to return target series as series or dataframe, useful for high-level interface.
- If True, returns target series as pandas.DataFrame.s
- If False, returns target series as pandas.Series.
Returns
-------
y : pandas Series/DataFrame
Lynx sales dataset
Details
-------
The annual numbers of lynx trappings for 1821–1934 in Canada. This time-series records the number of skins of predators (lynx) that were
collected over several years by the Hudson's Bay Company. The dataset was
taken from Brockwell & Davis (1991) and appears to be the series
considered by Campbell & Walker (1977).
Dimensionality: univariate
Series length: 114
Frequency: Yearly
Number of cases: 1
Notes
-----
This data shows aperiodic, cyclical patterns, as opposed to periodic, seasonal patterns.
References
----------
..[1] <NAME>., <NAME>. and <NAME>. (1988). The New S Language. Wadsworth & Brooks/Cole.
..[2] <NAME>. and <NAME>. (1977). A Survey of statistical work on the Mackenzie River series of
annual Canadian lynx trappings for the years 1821–1934 and a new analysis. Journal of the Royal Statistical Society
series A, 140, 411–431.
"""
name = 'Lynx'
fname = name + '.csv'
path = os.path.join(MODULE, DIRNAME, name, fname)
data = pd.read_csv(path, index_col=0)
# change period index to simple numeric index
# TODO add support for period/datetime indexing
# data.index = pd.PeriodIndex(data.index, freq='Y')
data = data.reset_index(drop=True)
if return_y_as_dataframe:
# return nested pandas DataFrame with a single row and column
return pd.DataFrame(pd.Series([pd.Series(data.squeeze())]), columns=[name])
else:
# return nested pandas Series with a single row
return
|
pd.Series([data.iloc[:, 0]], name=name)
|
pandas.Series
|
import pandas as pd
import numpy as np
import datetime
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
movies_data = pd.read_csv("import/movies.csv")
genome_scores_data =
|
pd.read_csv("import/genome-scores.csv")
|
pandas.read_csv
|
import pandas as pd
import os
from api.models import UrbanInstituteRentalCrisisData
import boto3
BUCKET_NAME = 'hacko-data-archive'
KEY = '2018-housing-affordability/data/urbaninstitute/'
s3 = boto3.resource('s3')
class DjangoImport(object):
django_model = None
def __init__(self, file_loc):
"""
Base class to import HUD Homelessness data from an Excel sheet into database via Django ORM.
Parameters:
source: name of source sheet in Excel file
file_loc: pandas ExcelFile object that contains the sheets
"""
self.df = None
self.file_loc = file_loc
if file_loc.endswith('_2000.csv'):
year = 2000
elif file_loc.endswith('_2010-14.csv'):
year = 2014
elif file_loc.endswith('_2005-09.csv'):
year = 2009
else:
raise Exception('No valid year found in file name ' + file_loc)
self.year = year
def process_frame(self):
"""
Process the dataframe created by pandas.read_excel into the desired format for import and set to self.df
"""
raise NotImplementedError("process_frame must be implemented by child class.")
def generate_objects(self):
"""
Generator function to create Django objects to save to the database. Takes the json generated from
self.generate_json and creates objects out of it.
"""
for body in self.generate_json():
obj = self.django_model(**body)
yield obj
def get_queryset(self):
"""
Returns all objects that come from this particular import e.g. for sheet A-1 import it will return all objects with source A-1
"""
return self.django_model.objects.filter(year=self.year)
def generate_json(self):
raise NotImplementedError("generate_json must be implemented by child class.")
def save(self, delete_existing=True, query=None):
"""
Adds the dataframe to the database via the Django ORM using self.generate_objects to generate Django objects.
Parameters:
delete_existing: option to delete the existing items for this import
query: Django Q object filter of JCHSData objects to know what to delete before import. Default is everything with self.source.
"""
if self.df is None:
self.process_frame()
if self.df is None:
raise Exception("self.df has not been set, nothing to add to database.")
if delete_existing:
if query is None:
qs = self.get_queryset()
else:
qs = django_model.objects.filter(query)
# delete existing items in index
qs.delete()
results = self.django_model.objects.bulk_create(self.generate_objects(), batch_size=10000)
return len(results)
def is_valid_value(self, val):
try:
d = Decimal(val)
if pd.isnull(val):
return False
return True
except:
return False
class UrbanInstituteImport(DjangoImport):
django_model = UrbanInstituteRentalCrisisData
def process_frame(self):
df = pd.read_csv(self.file_loc, encoding='iso-8859-1')
df.columns = [c.lower() for c in df.columns]
self.df = df
def generate_json(self):
for ix, row in self.df.iterrows():
state_flag = row['state_flag'] != 0
eli_limit = row['l30_4']
if pd.isnull(eli_limit):
continue
eli_renters = row['st_total' if state_flag else 'total']
aaa_units = row['st_units' if state_flag else 'units']
noasst_units = row['st_unitsnoasst' if state_flag else 'unitsnoasst']
hud_units = row['st_hud' if state_flag else 'hud']
usda_units = row['st_usda' if state_flag else 'usda']
no_hud_units = row['st_units_no_hud' if state_flag else 'units_no_hud']
no_usda_units = row['st_units_no_usda' if state_flag else 'units_no_usda']
county_name = row['countyname']
if not county_name.lower().endswith('county'):
county_name = county_name + ' County'
body = {
'year': self.year,
'eli_limit': eli_limit if
|
pd.notnull(eli_limit)
|
pandas.notnull
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_categorial_assigning_ops(self):
orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_take(self):
s = Series([-1, 5, 6, 2, 4])
actual = s.take([1, 3, 4])
expected = Series([5, 2, 4], index=[1, 3, 4])
tm.assert_series_equal(actual, expected)
actual = s.take([-1, 3, 4])
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
pytest.raises(IndexError, s.take, [1, 10])
pytest.raises(IndexError, s.take, [2, 5])
with tm.assert_produces_warning(FutureWarning):
s.take([-1, 3, 4], convert=False)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# gets coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ['a', 'b', 'c']})
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
|
pd.set_option('chained_assignment', None)
|
pandas.set_option
|
#!/usr/bin/python
import platform; print(platform.platform())
import sys; print("Python", sys.version)
import os
import numpy as np
import pandas as pd
from joblib import load
from sklearn.metrics import mean_squared_error
# set environment variables
MODEL_FILE = os.environ["MODEL_FILE"]
SCALER_FILE = os.environ["SCALER_FILE"]
DATA_FILE = os.environ["DATA_FILE"]
DATA_DIR = os.environ["DATA_DIR"]
DATA_PATH = os.path.join(DATA_DIR, DATA_FILE)
def inference():
# load docker_data and transform docker_data
data =
|
pd.read_csv(DATA_PATH)
|
pandas.read_csv
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result,
|
pd.Timedelta('1 day')
|
pandas.Timedelta
|
import numpy as np
import pandas as pd
import pickle
import os
from math import ceil
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import copy
from scipy.stats import entropy
from matplotlib.ticker import FormatStrFormatter
import statsmodels.api as sm
from sklearn.metrics import r2_score
def data_process_continuous(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])/3600
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_middle_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])/3600
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct']))/data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_middle_temp, Accuracy_temp,accuracy_all
def calculate_error(result_df):
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
######
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration']) ** 2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs'] / result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
return RMSE, MAPE, MAE, R_sq
def r_sq_for_two_parts(data,y_mean):
data['RES'] = (data['Ground_truth_duration'] - data['Predict_duration'])**2
data['TOT'] = (data['Ground_truth_duration'] - y_mean)**2
R_sq = 1 - sum(data['RES'])/sum(data['TOT'])
return R_sq
def data_process_continuous_R_sq(data):
_, _, _, R_sq_all = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_middle = data.loc[data['activity_index']!=0].copy()
mean_y = np.mean(data['Ground_truth_duration'])
R_sq_first = r_sq_for_two_parts(data_first, mean_y)
R_sq_middle = r_sq_for_two_parts(data_middle, mean_y)
return R_sq_first, R_sq_middle, R_sq_all
def data_process_continuous_RMSE(data):
RMSE_all, _, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_middle = data.loc[data['activity_index']!=0].copy()
RMSE_first, _, _, R_sq_first = calculate_error(data_first)
RMSE_middle, _, _, R_sq_middle = calculate_error(data_middle)
return RMSE_first, RMSE_middle, RMSE_all
def data_process_continuous_MAPE(data):
_, MAPE_all, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_middle = data.loc[data['activity_index']!=0].copy()
_, MAPE_first, _, R_sq_first = calculate_error(data_first)
_, MAPE_middle, _, R_sq_middle = calculate_error(data_middle)
return MAPE_first, MAPE_middle, MAPE_all
def data_process_discrete(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_middle_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct'])) / data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_middle_temp, Accuracy_temp, accuracy_all
def calculate_accuracy(result_df, task = None):
if task == 'loc':
RMSE = -1
MAPE = -1
MAE = -1
R_sq = -1
else:
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration'])**2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs']/result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
N_first = result_df['Correct'].loc[result_df['activity_index']==0].count()
Accuracy_first = result_df['Correct'].loc[(result_df['Correct']==1)&
(result_df['activity_index']==0)].count()/N_first
N_middle = result_df['Correct'].loc[result_df['activity_index']!=0].count()
Accuracy_middle = result_df['Correct'].loc[(result_df['Correct']==1)&
(result_df['activity_index']!=0)].count()/N_middle
N_all = result_df['Correct'].count()
Accuracy_all = result_df['Correct'].loc[result_df['Correct']==1].count()/N_all
return Accuracy_first, Accuracy_middle, Accuracy_all, N_first, N_middle, N_all, RMSE, MAPE, MAE, R_sq
def get_accuracy_and_num_act(individual_ID_list, output_fig, duration_error):
error_list=[]
total=0
error_middle =
|
pd.DataFrame({'middle':[]})
|
pandas.DataFrame
|
"""
API for the pi_logger module
adapted from:
https://www.codementor.io/@sagaragarwal94/building-a-basic-restful-api-in-python-58k02xsiq
"""
# pylint: disable=C0103
import json
import logging
import urllib
import pandas as pd
from flask import Flask, render_template, url_for
from flask_restful import Resource, Api
from pi_logger import PINAME, LOG_PATH
from pi_logger.local_db import (ENGINE, get_recent_readings,
get_last_reading)
from pi_logger.local_loggers import getserial, initialise_sensors
from pi_logger.local_loggers import (poll_all_dht22, poll_all_bme680,
poll_all_mcp3008)
app = Flask(__name__)
api = Api(app)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
LOG = logging.getLogger(f"pi_logger_{PINAME}.api_server")
class GetRecent(Resource):
"""
API resource to provide all readings since a given start_datetime (UTC)
"""
# pylint: disable=R0201
def get(self, start_datetime_utc, engine=ENGINE):
"""
GetRecent API resource get function
"""
start_datetime_utc = pd.to_datetime(start_datetime_utc)
result = get_recent_readings(start_datetime_utc, engine=engine)
if result is None:
msg = '{"message": "query returns no results"}'
result = json.loads(msg)
else:
result = pd.DataFrame(result)
result['datetime'] = pd.to_datetime(result['datetime'],
format="%Y-%m-%d %H:%M:%S")
result = result.to_json()
return result
class GetLast(Resource):
"""
API resource to provide the last recorded set of readings
"""
# pylint: disable=R0201
def get(self, engine=ENGINE):
"""
GetLast API resource get function
"""
result = get_last_reading(engine=engine)
if result is None:
msg = '{"message": "query returns no results"}'
result = json.loads(msg)
else:
result =
|
pd.DataFrame(result, index=[0])
|
pandas.DataFrame
|
"""
Module containing the commandline interface
"""
# pylint: disable=too-many-locals,import-outside-toplevel, too-many-arguments
import io
import subprocess
import pandas as pd
from ase.io.xyz import write_xyz
import numpy as np
import click
from res2desc import Atoms2Soap
from res2desc.res import read_stream
def cryan_out_adaptor(output, titl_list, desc_list, cryan_style):
"""Adapt the computed descriptors to the cryan style"""
nelem = desc_list.shape[1]
if cryan_style == '2':
for titl, desc in zip(titl_list, desc_list):
output.write(f'{nelem:d}\t')
np.savetxt(output, desc, newline='\t', fmt='%0.6G')
output.write('\n')
output.write(titl)
elif cryan_style == '1':
for titl, desc in zip(titl_list, desc_list):
output.write(f'{nelem:d}\n')
np.savetxt(output, desc, newline='\t', fmt='%0.6G')
output.write('\n')
output.write(titl)
else:
raise RuntimeError(f'Unknown cryan style {cryan_style}')
def process_titl_list(titl_list, atoms_list):
"""Process the titl_list"""
out_list = []
for titl, atom in zip(titl_list, atoms_list):
out_list.append('\t'.join([
titl.label,
str(titl.natoms),
atom.get_chemical_formula(mode='hill', empirical=True),
titl.symm.replace('(', "\"").replace(')', "\""),
str(titl.volume),
str(titl.enthalpy),
titl.flag3, # Number of times found
]) + '\n')
return out_list
@click.group('res2desc',
help='Commandline tool for converting SHELX files to descriptors')
@click.option(
'--input_source',
'-in',
type=click.File('r'),
default='-',
show_default='STDIN',
)
@click.option('--output',
'-out',
type=click.File('w'),
default='-',
show_default='STDOUT')
@click.option('--cryan/--no-cryan',
default=True,
show_default=True,
help=('Call cryan internally to obtain fully compatible output. '
'Should be disabled if cryan is not avaliable.'))
@click.option(
'--cryan-style-in',
help=
'Style of the cryan input, 1 for 3 lines for structure, 2 for 2 lines per structure. Automatically fallback to 1 if 2 does not work.',
default='2',
type=click.Choice(['1', '2']),
show_default=True,
)
@click.option(
'--cryan-style-out',
help=
'Style of the cryan output, 1 for 3 lines for structure, 2 for 2 lines per structure. Default to 3 lines for compatibility with SHEAP',
default='1',
type=click.Choice(['1', '2']),
show_default=True,
)
@click.option(
'--cryan-args',
type=str,
default='-v -dr 0',
show_default=True,
help=
'A string of the arges that should be passed to cryan, as if in the shell')
@click.pass_context
def cli(ctx, input_source, output, cryan, cryan_args, cryan_style_in,
cryan_style_out):
"""
Top level command, handles input_source and output streams
"""
ctx.ensure_object(dict)
ctx.obj['output'] = output
# Check if using cryan compatible mode
if cryan is True:
# Read all input_source in a buffer
if input_source.name == '<stdin>':
if input_source.isatty():
# nothing in stdin
titl_lines = []
titl_list = []
atoms_list = []
inp = None
else:
# Reading from stdin
inp, titl_lines, titl_list, atoms_list = read_with_cryan(
input_source, cryan_args, cryan_style_in)
else:
# Reading from file
inp, titl_lines, titl_list, atoms_list = read_with_cryan(
input_source, cryan_args, cryan_style_in)
# Reset the input stream
ctx.obj['input_source'] = inp
ctx.obj['titl_lines'] = titl_lines
else:
ctx.obj['input_source'] = input_source
ctx.obj['titl_lines'] = None
titl_list, atoms_list = read_stream(input_source)
ctx.obj['titl_list'] = titl_list
ctx.obj['atoms_list'] = atoms_list
ctx.obj['cryan_style_out'] = cryan_style_out
def read_with_cryan(input_source, cryan_args, cryan_style):
"""Read stuff with cryan from a source
:param input_source: file object to be read.
:returns: a list of [inp, titl_lines, titl_list, atoms_list]"""
inp = io.StringIO()
inp.write(input_source.read())
# We are getting piped inputs
inp.seek(0)
cryan_args = cryan_args.split()
subp = subprocess.Popen(['cryan', *cryan_args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True)
ops, _ = subp.communicate(inp.read())
inp.seek(0)
if cryan_style not in ('1', '2'):
raise RuntimeError(f'Unkown cryan style: {cryan_style}')
# First try style == 2, this is the standard cryan
if cryan_style == '2':
titl_lines = ops.splitlines(keepends=True)[1::2]
# If it does not work, we try style == 1, this is the
# style Ben describe with 3 lines per structure
if len(titl_lines[0].split()) != 7:
cryan_style = '1'
if cryan_style == '1':
titl_lines = ops.splitlines(keepends=True)[2::3]
# Check titl again
if len(titl_lines[0].split()) != 7:
raise RuntimeError('Ill formated cryan input detected. Terminating.')
titl_list, atoms_list = read_stream(inp)
# Reset the buffer
inp.seek(0)
return inp, titl_lines, titl_list, atoms_list
# pylint: disable=too-many-arguments
@cli.command('soap', help='Compute SOAP descriptors')
@click.option('--nprocs',
'-np',
help='Number of processes for parallelisation.',
default=1)
@click.option('--l-max', default=4, show_default=True)
@click.option('--n-max', default=8, show_default=True)
@click.option('--cutoff', default=5, show_default=True)
@click.option('--atom-sigma', default=0.1, show_default=True)
@click.option('--crossover/--no-crossover',
default=True,
show_default=True,
help='Whether do the crossover for multiple species')
@click.option('--species-names',
'-sn',
required=False,
type=str,
help='Symbols of all species to be considered, should be a list')
@click.option(
'--centres-name',
'-cn',
required=False,
type=str,
multiple=True,
help=
'Centres where the descriptor should be computed. If not specified, defaults to all atomic sites. NOT IMPLEMENTED FOR NOW'
)
@click.option(
'--average/--no-average',
default=True,
show_default=True,
help=
'Averaging descriptors for each structrure, rather than output those for individual sites.'
)
@click.option('--periodic/--no-periodic',
default=True,
show_default=True,
help='Whether assuming periodic boundary conditions or not')
@click.pass_context
# # pylint: disable=unused-argument
def cmd_soap(ctx, cutoff, l_max, n_max, atom_sigma, nprocs, centres_name,
species_names, average, periodic, crossover):
"""
Compute SOAP descriptors for res files, get the order or files from
the `ca -v` commands for consistency.
"""
titl_list, atoms_list = ctx.obj['titl_list'], ctx.obj['atoms_list']
cryan_style = ctx.obj['cryan_style_out']
if not species_names:
species_names = set()
for atoms in atoms_list:
_ = [species_names.add(x) for x in atoms.get_chemical_symbols()]
else:
species_names = species_names.split()
desc_settings = {
'rcut': cutoff,
'lmax': l_max,
'nmax': n_max,
'sigma': atom_sigma,
'average': average,
'species': species_names,
'periodic': periodic,
'crossover': crossover,
}
comp = Atoms2Soap(desc_settings)
descs = comp.get_desc(atoms_list, nprocs)
output = ctx.obj['output']
# Process the tilt_lines
titl_lines = ctx.obj.get('titl_lines')
if titl_lines is None:
# Not read from cryan, contruct these lines here
titl_lines = process_titl_list(titl_list, atoms_list)
cryan_out_adaptor(output, titl_lines, descs, cryan_style)
@cli.command('xyz', help='Create concatenated xyz files')
@click.option(
'--label-file',
help=
'Filename for writing out the labels. CSV will be use if the file name has the right suffix.'
)
@click.pass_context
def cmd_xyz(ctx, label_file):
"""
Commandline tool for creating a concatenated xyz files from res
also, the labels for each strucure is saved
"""
titl_list, atoms_list = ctx.obj['titl_list'], ctx.obj['atoms_list']
output = ctx.obj['output']
# Setup info for atoms
for atoms, titl in zip(atoms_list, titl_list):
atoms.info['label'] = titl.label
atoms.info['enthalpy'] = titl.enthalpy
atoms.info['pressure'] = titl.pressure
atoms.info['symmetry'] = titl.symm
# Write the xyz files
write_xyz(output, atoms_list)
# Write the label file
from tabulate import tabulate
if label_file:
data = [[
'label', 'natoms', 'enthalpy', 'volume', 'pressure', 'symmetry'
]]
for titl in titl_list:
data.append([
titl.label, titl.natoms, titl.enthalpy / titl.natoms,
titl.volume / titl.natoms, titl.pressure, titl.symm
])
if label_file.endswith('.csv'):
dataframe =
|
pd.DataFrame(data[1:], columns=data[0])
|
pandas.DataFrame
|
# Imports
from sqlalchemy import String, Integer, Float, Boolean, Column, and_, ForeignKey
from connection import Connection
from datetime import datetime, time, date
import time
from pytz import timezone
import pandas as pd
import numpy as np
import os
from os import listdir
from os.path import isfile, join
from openpyxl import load_workbook
import openpyxl
# Import modules
from connection import Connection
from user import UserData
from test import TestData
from tables import User, Tests, TestsQuestions, Formative, FormativeQuestions
class ProcessEvaluationData(TestData):
"""
A class to handle process evaluation data. To initialize the class you need to specify the SQL connection String.
"""
def __init__(self, connection_string):
# First we create a session with the DB.
self.session = Connection.connection(connection_string)
self.data = pd.DataFrame()
# These are empty dataframes for transition proportions.
self.transitions_glb = pd.DataFrame()
self.transitions_eight_grade = pd.DataFrame()
self.transitions_nine_grade = pd.DataFrame()
self.transitions_ten_grade = pd.DataFrame()
self.transitions_eleven_grade = pd.DataFrame()
# General methods for process evaluation
def read_files(self, **kwargs):
"""
This function goes through the specified directories and reades files into a temporary dictionary called
temp_data. The data is read as dataframe and stored with a key as the name of the file (e.g., user_2019).
After reading in all the files the function changes the directory to the global one (the where it started from).
"""
self.temp_data = {}
for i in kwargs:
grade = i
year = kwargs[grade]
current_dir = os.getcwd()
if grade != 'user':
os.chdir(current_dir + '/Data/{}_grade/{}_grade_{}'.format(grade, grade, year))
else:
os.chdir(current_dir + '/Data/{}/{}_{}'.format(grade, grade, year))
for f in listdir(os.path.abspath(os.getcwd())):
if (f.split('.')[1] == 'csv') and (f.split('.')[0] != 'Comments'): # This part makes sure that xlsx files are excluded.
self.temp_data[str(f.split('.')[0])] = pd.read_csv(str(os.path.abspath(f)),
encoding = 'utf-8', engine='c')
os.chdir(current_dir)
def transitions_global(self, trial = None):
"""
The method is designed for calculating transition statistics. The function has two modes: Trial = True/False.
When the trial is True then the it takes searches for directories with the trial date (2019). Otherwise the function
takes the past year (current year - 1).
"""
def tranistions_grouped(group_by):
"""
The group_by argument needs to be specified to show on which variable/s the aggrigation shall be implemented on.
"""
if trial:
year = 2019
else:
now = datetime.now()
year = now.year - 1
self.year = year
self.global_step_one = {} # Step 1: Registration
self.global_step_two = {} # Step 2: Pre-test
self.global_step_three = {} # Step 3: Post-test
self.read_files(**{'user' : self.year,
'eight' : self.year,
'nine' : self.year,
'ten' : self.year,
'eleven' : self.year})
"""
After reading in the files the method creates dictionaries for each step (3 dictionaries in total).
"""
for i in self.temp_data.keys():
if 'post' in i: # assembles the post-test data
self.global_step_three[i]= pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
self.global_step_two[i] = pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
elif 'dropouts' in i: # adds droupouts data to step two.
self.global_step_two[i] = pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
elif 'user' in i: # add user data to step one.
self.global_step_one[i] = pd.DataFrame(self.temp_data[i]\
.groupby([i for i in group_by])['user_id'].count())
df1 = pd.concat(self.global_step_three.values(), axis = 1)
df1 = pd.DataFrame(df1.sum(axis=1, skipna=True))
df1.rename(columns={ 0 : 'Step_Three'}, inplace = True)
df2 = pd.concat(self.global_step_two.values(), axis = 1, sort=True)
df2 = pd.DataFrame(df2.sum(axis=1, skipna=True))
df2.rename(columns={ 0 : 'Step_Two'}, inplace = True)
df3 = pd.concat(self.global_step_one.values(), axis = 1)
df3 = pd.DataFrame(df3.sum(axis=1, skipna=True))
df3.rename(columns={ 0 : 'Step_One'}, inplace = True)
transitions = pd.concat([df3, df2, df1], axis = 1, sort=True)
transitions = transitions.T.assign(Total = lambda x: x.sum(1)).T
pc_change = transitions.pct_change(axis = 'columns').round(2) # Calculates percentage change between the steps and rounds to the second digit.
pc_change.rename(columns = {'Step_One' : 'Step_One',
'Step_Two' : 'Step_Two',
'Step_Three' : 'Step_Three',
'Step_One' : 'Step_One_change',
'Step_Two' : 'Step_Two_change',
'Step_Three' : 'Step_Three_change'}, inplace = True)
transitions_pc = pd.concat([transitions, pc_change], axis = 1)
transitions_pc.drop('Step_One_change', axis = 1, inplace = True)
return transitions_pc
def transition_time(group_by):
pre_date = {}
for i in self.temp_data.keys():
if 'pre' in i:
pre_date.update({ i : self.temp_data[i][['user_id', 'user_grade', 'user_created_at', 'region_name', 'user_sex', 'pre_tests_res_date']]\
.drop_duplicates(subset = 'pre_tests_res_date', keep="last") })
post_date = {}
for i in self.temp_data.keys():
if 'post' in i:
post_date.update({i : self.temp_data[i][['user_id', 'user_grade', 'user_created_at', 'region_name', 'user_sex', 'pre_tests_res_date']]\
.drop_duplicates(subset = 'pre_tests_res_date', keep="last")})
d1 = pd.concat(pre_date, ignore_index=True)
d2 = pd.concat(post_date, ignore_index=True)
transitions_time = d1.merge(d2, on = ['user_id', 'region_name', 'user_grade', 'user_sex', 'user_created_at'])
transitions_time.rename(columns = {'pre_tests_res_date_x' : 'pre',
'pre_tests_res_date_y' : 'post'}, inplace = True)
transitions_time['pre'] = transitions_time['pre'].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
transitions_time['post'] = transitions_time['post'].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
transitions_time['user_created_at'] = transitions_time['user_created_at'].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
transitions_time['tdelta_registered_pre'] = transitions_time['pre'] - transitions_time['user_created_at']
transitions_time['tdelta_pre_post'] = transitions_time['post'] - transitions_time['pre']
transitions_time['tdelta_registered_post'] = transitions_time['post'] - transitions_time['user_created_at']
df1 = pd.DataFrame(transitions_time.groupby([i for i in group_by])['tdelta_registered_pre'].quantile(0.75).astype(str))
df2 = pd.DataFrame(transitions_time.groupby([i for i in group_by])['tdelta_pre_post'].quantile(0.75).astype(str))
df3 = pd.DataFrame(transitions_time.groupby([i for i in group_by])['tdelta_registered_post'].quantile(0.75).astype(str))
combined_transitions_time = df1.join([df2, df3])
return combined_transitions_time
def test_time(group_by):
pre_date = {}
for i in self.temp_data.keys():
if 'pre' in i:
pre_date.update({ i : self.temp_data[i][['user_id', 'user_grade', 'region_name', 'user_sex', 'pre_tests_res_time']]\
.drop_duplicates(subset = 'user_id', keep="last") })
post_date = {}
for i in self.temp_data.keys():
if 'post' in i:
post_date.update({i : self.temp_data[i][['user_id', 'user_grade', 'region_name', 'user_sex', 'pre_tests_res_time']]\
.drop_duplicates(subset = 'user_id', keep="last")})
d1 = pd.concat(pre_date, ignore_index=True)
d2 = pd.concat(post_date, ignore_index=True)
transitions_test_time = d1.merge(d2, on = ['user_id', 'region_name', 'user_grade', 'user_sex'])
transitions_test_time.rename(columns = {'pre_tests_res_time_x' : 'pre_test_time_minutes',
'pre_tests_res_time_y' : 'post_test_time_minutes'}, inplace = True)
def percentile(n): # an inner function to calculate percentiles
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
transitions_test_time = transitions_test_time.groupby([i for i in group_by])['pre_test_time_minutes' , 'post_test_time_minutes']\
.aggregate(['min', np.median, percentile(75), np.mean, max])\
.apply(lambda x:
|
pd.to_timedelta(x, unit='s')
|
pandas.to_timedelta
|
#!/usr/bin/env python
# coding: utf-8
# ## 1. Multi-Class Classification:
# For the multiclass classification problem, there were six different datasets. Some of the datasets contain missing values. For example, TrainData1, TestData1 and TrainData3 contain some missing values (1.00000000000000e+99). Therefore, the first approach needs to handle the missing values for selecting the features. Then compare the accuracy on train dataset to find out which classifier gives best result for each dataset with cross validation to verify the accuracy based on test dataset.
# <center><div style='width:50%; height:50%'><img src='/Users/hu5ky5n0w/Desktop/Github_Repos/Data/Python/Machine_Learning/Project/images/Q1_table.jpeg'></div></center>
#
# Hint:
# * Missing Value Estimation
# - (KNN method for imputation of the missing values)
# * Dimensionality Reduction
# * Use Several Classifiers/ Ensemble Method
# - Logistic Regression (with different c values)
# - Random Forest (with different estimator values)
# - SVM (with different kernels)
# - KNN (with k = 1,2,5,10,20)
# - K (3,5,10) Fold Cross Validation
# * Performance Comparison
# - Classification Accuracy, Precision, Recall, Sensitivity, Specificity
# - AUC, ROC Curve
# In[1]:
import warnings
warnings.filterwarnings('ignore')
# Base packages
import gc, sys, re, os
from time import strptime, mktime
# Data processing/preprocessing/modeling packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import offsetbox
np.random.seed(1)
import seaborn as sns
import statistics as stat
from sklearn.preprocessing import *
# Modeling settings
plt.rc("font", size=14)
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
# Testing & Validation packages
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score, auc, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.decomposition import PCA
# SVM
from sklearn.svm import *
# KNN
from sklearn.neighbors import KNeighborsClassifier
from impyute.imputation.cs import fast_knn
# Logistic Regression
from sklearn.linear_model import LogisticRegression
# Random Forest
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import export_graphviz
from six import StringIO
from IPython.display import Image
from pydotplus import *
# SVM
from sklearn.svm import SVC, LinearSVC
# In[2]:
def optimizeK(X_train, y_train, X_test, y_test):
neighbors = np.arange(1,20)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for i,k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
train_accuracy[i] = knn.score(X_train, y_train)
test_accuracy[i] = knn.score(X_test, y_test2)
return neighbors, test_accuracy, train_accuracy
# In[10]:
def plotK(neighbors, test_accuracy, train_accuracy):
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.xticks(np.arange(0, neighbors[-1], step=1))
plt.ylabel('Accuracy')
plt.title('KNN Varying Number of Neighbors')
plt.show()
# In[11]:
X_train2 = pd.read_csv('data/1/TrainData2.txt', delimiter='\s+', header=None)
X_train3 = pd.read_csv('data/1/TrainData3.txt', delimiter='\s+', header=None)
X_train4 = pd.read_csv('data/1/TrainData4.txt', delimiter='\s+', header=None)
# In[12]:
y_train2 = pd.read_csv('data/1/TrainLabel2.txt', delimiter='\n', header=None)
y_train3 =
|
pd.read_csv('data/1/TrainLabel3.txt', delimiter='\n', header=None)
|
pandas.read_csv
|
#!/usr/bin/python
print('financials_process_annually - initiating.')
import os
import pandas as pd
pd.set_option('display.max_columns', None)
pd.options.display.float_format = '{:20,.2f}'.format
pd.options.mode.use_inf_as_na = True
cwd = os.getcwd()
input_folder = "0_input"
temp_folder = "temp"
financials_temp = "financials_annually"
from pathlib import Path
paths = Path(os.path.join(cwd,input_folder,temp_folder,financials_temp)).glob('**/*.csv')
financials_table = []
for path in paths:
path_in_str = str(path)
try:
fundamentals_parse = pd.read_csv(path,low_memory=False)
if not fundamentals_parse.empty:
financials_table.append(fundamentals_parse)
print(path_in_str)
else:
pass
except:
pass
# export
financials_table =
|
pd.concat(financials_table)
|
pandas.concat
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Search for pulsars."""
import warnings
import os
import argparse
import copy
import numpy as np
from astropy import log
from astropy.table import Table
from astropy.logger import AstropyUserWarning
from .io import get_file_type
from stingray.pulse.search import (
epoch_folding_search,
z_n_search,
search_best_peaks,
)
from stingray.gti import time_intervals_from_gtis
from stingray.utils import assign_value_if_none
from stingray.pulse.modeling import fit_sinc, fit_gaussian
from stingray.stats import pf_upper_limit
from .io import load_events, EFPeriodogram, save_folding, HEN_FILE_EXTENSION
from .base import hen_root, show_progress, adjust_dt_for_power_of_two
from .base import deorbit_events, njit, prange, vectorize, float64
from .base import histogram2d, histogram, memmapped_arange
from .base import z2_n_detection_level, fold_detection_level
from .fold import filter_energy
from .ffa import _z_n_fast_cached, ffa_search, h_test
from .fake import scramble
try:
import matplotlib.pyplot as plt
HAS_MPL = True
except ImportError:
HAS_MPL = False
try:
import imageio
HAS_IMAGEIO = True
except ImportError:
HAS_IMAGEIO = False
D_OMEGA_FACTOR = 2 * np.sqrt(3)
TWOPI = 2 * np.pi
__all__ = [
"check_phase_error_after_casting_to_double",
"decide_binary_parameters",
"folding_orbital_search",
"fit",
"calculate_shifts",
"mod",
"shift_and_sum",
"z_n_fast",
"transient_search",
"plot_transient_search",
"search_with_qffa_step",
"search_with_qffa",
"search_with_ffa",
"folding_search",
"dyn_folding_search",
"main_efsearch",
"main_zsearch",
"z2_vs_pf",
"main_z2vspf",
"main_accelsearch",
"h_test",
]
def _save_df_to_csv(df, csv_file, reset=False):
if not os.path.exists(csv_file) or reset:
mode = "w"
header = True
else:
mode = "a"
header = False
df.to_csv(csv_file, header=header, index=False, mode=mode)
def check_phase_error_after_casting_to_double(tref, f, fdot=0):
"""Check the maximum error expected in the phase when casting to double."""
times = np.array(np.random.normal(tref, 0.1, 1000), dtype=np.longdouble)
times_dbl = times.astype(np.double)
phase = times * f + 0.5 * times ** 2 * fdot
phase_dbl = times_dbl * np.double(f) + 0.5 * times_dbl ** 2 * np.double(
fdot
)
return np.max(np.abs(phase_dbl - phase))
def decide_binary_parameters(
length,
freq_range,
porb_range,
asini_range,
fdot_range=[0, 0],
NMAX=10,
csv_file="db.csv",
reset=False,
):
import pandas as pd
count = 0
omega_range = [1 / porb_range[1], 1 / porb_range[0]]
columns = [
"freq",
"fdot",
"X",
"Porb",
"done",
"max_stat",
"min_stat",
"best_T0",
]
df = 1 / length
log.info(
"Recommended frequency steps: {}".format(
int(np.diff(freq_range)[0] // df + 1)
)
)
while count < NMAX:
# In any case, only the first loop deletes the file
if count > 0:
reset = False
block_of_data = []
freq = np.random.uniform(freq_range[0], freq_range[1])
fdot = np.random.uniform(fdot_range[0], fdot_range[1])
dX = 1 / (TWOPI * freq)
nX = int(np.diff(asini_range) // dX) + 1
Xs = np.random.uniform(asini_range[0], asini_range[1], nX)
for X in Xs:
dOmega = 1 / (TWOPI * freq * X * length) * D_OMEGA_FACTOR
nOmega = int(np.diff(omega_range) // dOmega) + 1
Omegas = np.random.uniform(omega_range[0], omega_range[1], nOmega)
for Omega in Omegas:
block_of_data.append(
[freq, fdot, X, TWOPI / Omega, False, 0.0, 0.0, 0.0]
)
df =
|
pd.DataFrame(block_of_data, columns=columns)
|
pandas.DataFrame
|
import os
import io
from dotenv import load_dotenv
from datetime import datetime
import math
# Import all machine learning computation libraries
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
import pandas as pd
import numpy
# Import library to generate model
import pickle
from dcd.entities.thing import Thing
# The thing ID and access token
load_dotenv()
THING_ID = os.environ['THING_ID']
THING_TOKEN = os.environ['THING_TOKEN']
# Where to save the model to
MODEL_FILE_NAME = "model.pickle"
# Data collection time frame (in milliseconds)
START_TS = 1553276760000
END_TS = 1553276760000+300000
# Property ID
PROPERTY_DATA = "fsr-7b9c"
PROPERTY_LABEL = "dhaval-bbca"
# Instantiate a thing with its credential
my_thing = Thing(thing_id=THING_ID, token=THING_TOKEN)
# We can fetch the details of our thing
my_thing.read()
print(my_thing.to_json())
def unix_time_millis(dt):
epoch = datetime.utcfromtimestamp(0)
return math.floor((dt - epoch).total_seconds() * 1000.0)
def list_to_df(dataSet):
dfObj =
|
pd.DataFrame(dataSet)
|
pandas.DataFrame
|
import sys
import json
import requests
import pandas as pd
import numpy as np
import os.path
import datetime
import sqlite3
import hashlib
from pandas import HDFStore
import plotly
import collectobot
import plotly.graph_objs as go
DATA_PATH = '../test_data/' #TODO in current directory while testing, needs to be fixed before shipping!
HDF_NAME = '../test_data/cbot.hdf5'
GRAPH_DATABASE = '../test_data/graph.db'
class yaha_analyzer(object):
def __init__(self):
self.total_pages = 0
self.history = []
self.username = ''
self.api_key = ''
self.new_data = False
def generate_collectobot_data(self):
"""
Generates collect-o-bot data from the database, writes it to a hdf5 file
:return: list of games
:rtype: pandas dataframe
"""
results = collectobot.aggregate()
self.games = results
self.history = {'children': results, 'meta': {'total_items': len(results)}}
self.generate_decks(dates = False)
self.write_hdf5(HDF_NAME)
return results
def open_collectobot_data(self):
"""
Loads the collectobot data from a hdf5 file
"""
self.read_data(hdf5_name=HDF_NAME)
def _load_json_data(self, json_file):
"""
Opens a json file and loads it into the object, this method is meant for testing
:param json_file: location of the json file
:type json_file: string
:return: list of games
:rtype: pandas dataframe
"""
with open(json_file, "r") as infile:
results = json.load(infile)
self.history = results
self.generate_decks()
return results
def pull_data(self, username, api_key):
"""
Grabs the data from the trackobot servers, writes it out to a new files and the database if it doesn't exist/outdated
:param username: trackobot username
:param api_key: trackobot api key
:type username: string
:type api_key: string
:return: contents of the json_files
:rtype: dictionary
"""
self.username = username
self.api_key = api_key
url = 'https://trackobot.com/profile/history.json?'
auth = {'username': username, 'token': api_key}
req = requests.get(url, params=auth).json()
metadata = req['meta']
user_hash, count, json_name, hdf5_name = self.store_data()
#if it's not equal, repull
if metadata['total_items'] != count or not self.check_data(json_name, hdf5_name):
results = {'children': req['history']}
if metadata['total_pages'] != None:
for page_number in range(2, metadata['total_pages']+1):
auth['page'] = page_number
results['children'].extend(requests.get(url, params=auth).json()['history'])
results['meta'] = {'total_items': metadata['total_items']}
self.history = results
self.generate_decks()
self.write_hdf5(hdf5_name)
with open('{}{}'.format(DATA_PATH, json_name), "w") as outfile:
json.dump(results, outfile)
self.update_count(user_hash, metadata['total_items']) #once everything's been loaded and written, update the total_items count in the database
else:
results = self.read_data(json_name, hdf5_name)
return results
def generate_decks(self, dates = True):
"""
Differentiates between the different deck types, and sorts them into their individual lists (history is a massive array, transform into a pandas dataframe for processing)
:param dates: generate specific dates into their own columns
:type dates: bool
:return: list of games
:rtype: pandas dataframe
"""
self.games = pd.DataFrame(self.history['children'])
self.games.loc[self.games['hero_deck'].isnull(), 'hero_deck'] = 'Other'
self.games.loc[self.games['opponent_deck'].isnull(), 'opponent_deck'] = 'Other'
self.games['p_deck_type'] = self.games['hero_deck'].map(str) + '_' + self.games['hero']
self.games['o_deck_type'] = self.games['opponent_deck'].map(str) + '_' + self.games['opponent']
self._generate_cards_played()
if dates:
self._make_dates()
self.games = self.games[self.games['card_history'].str.len() != 0]
return self.games
def _unique_decks(self, game_mode='ranked', game_threshold = 5, formatted = True):
"""
Returns a list with the unique decks for that game mode in self.games
>> Don't actually use this, call the database instead
:param game_mode: the game mode, 'ranked', 'casual', or 'both
:param game_threshold: the minimum amount of games the deck has to show up
:type game_mode: string
:type game_threshold: int
:returns: list of unique p_deck_types
:rtype: list of strings
"""
deck_types = self.generate_matchups(game_mode, game_threshold).reset_index()
deck_types = deck_types['p_deck_type'].unique()
if formatted:
return sorted(list(map(lambda x: x.replace("_", " "), deck_types)))
return deck_types
def _unique_cards(self, game_mode='ranked', game_threshold = 5, formatted = True):
"""
Returns a list with the unique cards for that game mode in self.games
>> Don't actually use this, call the database instead
:param game_mode: the game mode, 'ranked', 'casual', or 'both
:param game_threshold: the minimum amount of games the deck has to show up
:type game_mode: string
:type game_threshold: int
:return: a list of cards
:rtype: list of strings
"""
cards = self.generate_card_stats(game_mode, game_threshold).reset_index()
cards = cards['card'].unique().tolist()
return cards
def _make_dates(self):
"""Internal method -- Converts the dates in self.games to separate columns for easier parsing, called by generate_decks"""
format_date = lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ')
split_date = lambda x: {'year': x.year, 'month': x.month, 'day': x.day, 'hour': x.hour, 'minute': x.minute, 'second': x.second}
date_df = pd.DataFrame(list(map(lambda x: split_date(format_date(x)), self.games['added'])))
self.games = self.games.join(date_df, how='outer')
def _get_card_list(self, dict_list, player='me'):
"""
Internal method -- Returns the list of cards that were played in a game, called by _generate_cards_played
Keyword parameters:
dict_list -- list of dictionaries from the ['card_history'] column in self.games for one particular game
player -- the player to be parsing
Returns:
p_card_list -- array of card names (str)
"""
p_card_list = list(filter(None, map(lambda x: x['card']['name'] if x['player'] == player else None, dict_list)))
return p_card_list
def _generate_cards_played(self):
"""Internal method -- Generates a list of cards for player and opponent into the list ['p_cards_played'] and ['o_cards_played'], called by generate_decks"""
self.games['p_cards_played'] = self.games['card_history'].map(lambda x: self._get_card_list(x, player='me'))
self.games['o_cards_played'] = self.games['card_history'].map(lambda x: self._get_card_list(x, player='opponent'))
def generate_matchups(self, game_mode = 'ranked', game_threshold = 0):
"""
Generates a pandas groupby table with duration, count, coin, win #, win%, and card_history
:param game_mode: the game mode, 'ranked', 'casual', or 'both
:param game_threshold: the minimum amount of games the deck has to show up
:type game_mode: string
:type game_threshold: int
:return: grouped, indicies are player 'p_deck_type' then opponent 'o_deck_type'
:rtype: pandas groupby
"""
decks = self.games
if game_mode != 'both':
decks = decks[decks['mode'] == game_mode]
decks.loc[:, 'win'] = decks['result'].map(lambda x: True if x == 'win' else False)
decks.loc[:, 'count'] = [1]*len(decks)
grouped = decks.groupby(['p_deck_type', 'o_deck_type']).agg({'coin': np.sum, 'duration': [np.mean, np.std], 'count': np.sum, 'win': np.sum, 'card_history': lambda x: tuple(x)})
grouped['win%'] = grouped['win']['sum']/grouped['count']['sum']*100
grouped = grouped[grouped['count']['sum'] > game_threshold]
return grouped #note this returns a groupby, so a reset_index is necessary before pivoting/plotting
def generate_cards(self, filtered):
"""
Generates a grouped win/loss count for specific cards
:param filtered: subset of self.games filtered
:type filtered: pandas dataframe
:return: p_df, o_df -- cards marked as 'me' for player, index is the card name ['card'], columns are win count and loss count ['win', 'loss'], cards marked as 'opponent' for player, index is the card name ['card'], columns are win count and loss count ['win', 'loss']
:rtype: pandas groupby, pandas groupby
"""
p_df = []
o_df = []
for r in zip(filtered['p_cards_played'], filtered['o_cards_played'], filtered['result']):
for p_card in r[0]:
p_df.append({'card': p_card, 'win': 1, 'loss': 0} if r[2] == 'win' else {'card': p_card, 'win': 0, 'loss': 1})
for o_card in r[1]:
o_df.append({'card': o_card, 'win': 1, 'loss': 0} if r[2] == 'loss' else {'card': o_card, 'win': 0, 'loss': 1})
p_df = pd.DataFrame(p_df)
o_df = pd.DataFrame(o_df)
p_df = p_df.groupby('card').agg(np.sum)
o_df = o_df.groupby('card').agg(np.sum)
return p_df, o_df
def generate_decklist_matchups(self, game_mode = 'ranked', game_threshold = 2):
"""
Generates a dataframe with a list of cards, and the matchups where the card won and lost in the format of: ['card', 'p_deck_type', 'winning_matchups', 'losing_matchups']
:param game_mode: the game mode, 'ranked', 'casual', or 'both
:param game_threshold: the minimum amount of games the deck has to show up
:type game_mode: string
:type game_threshold: int
:return: cards with ['card', 'p_deck_type', 'o_deck_type', 'loss', 'win', 'win%']
:rtype: pandas groupby
"""
cards = []
gs = self.games
if game_mode != 'both':
gs = gs[gs['mode'] == game_mode]
for r in zip(gs['p_cards_played'], gs['result'], gs['p_deck_type'], gs['o_deck_type']):
for card in r[0]:
data = {'card': card, 'p_deck_type': r[2], 'o_deck_type': r[3], 'win': 1, 'loss': 0} if r[1] == 'win' else {'card': card, 'p_deck_type': r[2], 'o_deck_type': r[3], 'win': 0, 'loss': 1}
cards.append(data)
cards = pd.DataFrame(cards)
cards = cards.groupby(['card', 'p_deck_type', 'o_deck_type']).agg(np.sum)
cards = cards[(cards['win'] + cards['loss']) > game_threshold]
cards.loc[:, 'win%'] = cards['win']/(cards['win'] + cards['loss'])
cards['total_games'] = cards['win'] + cards['loss']
return cards
def generate_card_stats(self, game_mode='ranked', game_threshold = 2):
"""
Returns a groupby object with ['card', 'p_deck_type', 'o_deck_type', 'turn', 'loss', 'win'] as [str, str, str, int, int, int]
:param game_mode: game type
:param card_threshold: the minimum amount of time the card has to show up
:type game_mode: str
:type card_threshold: str
:return: cards
:rtype: pandas groupby object
"""
cards = []
gs = self.games
if game_mode != 'both':
gs = gs[gs['mode'] == game_mode]
for r in zip(gs['card_history'], gs['result'], gs['p_deck_type'], gs['o_deck_type']):
for play in r[0]:
card = play['card']['name']
player = play['player']
turn = play['turn']
result = {'win': 1, 'loss': 0} if r[1] == 'win' else {'win': 0, 'loss': 1}
card_data = {'card': card, 'player': player, 'turn': turn}
player_data = {'p_deck_type': r[2], 'o_deck_type': r[3]} if player == 'me' else {'p_deck_type': r[3], 'o_deck_type': r[2]}
data = result.copy()
data.update(card_data)
data.update(player_data)
cards.append(data)
cards = pd.DataFrame(cards)
cards = cards.groupby(['card', 'p_deck_type', 'o_deck_type', 'turn']).agg(np.sum)
cards = cards[cards['win'] + cards['loss'] > game_threshold]
cards.loc[:, 'win%'] = cards['win']/(cards['win'] + cards['loss'])
cards['total_games'] = cards['win'] + cards['loss']
return cards
def create_heatmap(self, x, y, z, df, title, layout = None, text = None):
"""
Creates a heatmap x, y, and z
:param x: name of the x value column
:param y: name of the y value column
:param z: name of the z value column
:param df: dataframe
:param title: heatmap title
:param layout: dictionary for plotly layout. Autogenerated if None is passed
:param text: column to be displayed for hover text
:type x: string
:type y: string
:type z: string
:type df: pandas dataframe
:type title: string
:type layout: dictionary
:type text: string
:return: one dictionary to be used with plotly.utils.PlotlyJSONEncoder
:rtype: list
"""
data = df.reset_index()
hover_text = []
if text:
text = data[[x, y, text]]
text = text.pivot(x, y)
hover_text = [text[x].values.tolist() for x in text.columns]
for n, row in enumerate(hover_text):
for m, val in enumerate(row):
hover_text[n][m] = 'Total Games: {}'.format(hover_text[n][m])
data = data[[x, y, z]]
data.loc[:, z] = data[z]*100
x_vals = sorted(data[x].unique())
y_vals = sorted(data[y].unique())
x_vals = list(map(lambda x: x.replace('_', ' '), x_vals))
y_vals = list(map(lambda x: x.replace('_', ' '), y_vals))
data = data.pivot(x, y)
z_vals = [data[x].values.tolist() for x in data.columns]
titles = self.title_format(x, y, z)
if layout == None:
annotations = []
for n, row in enumerate(z_vals):
for m, val in enumerate(row):
var = z_vals[n][m]
annotations.append(
dict(
text = '{:.2f}%'.format(val) if not
|
pd.isnull(val)
|
pandas.isnull
|
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 0, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dni(mocker, make_observation,
default_index):
obs = make_observation('dni')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dhi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dhi_limits_QCRad']]
obs = make_observation('dhi')
data = pd.Series([10, 1000, -100, 200, 200], index=default_index)
flags = tasks.validate_dhi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dhi(mocker, make_observation,
default_index):
obs = make_observation('dhi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_poa_global(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_poa_clearsky']]
obs = make_observation('poa_global')
data = pd.Series([10, 1000, -400, 300, 300], index=default_index)
flags = tasks.validate_poa_global(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_poa_global(mocker, make_observation,
default_index):
obs = make_observation('poa_global')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_air_temp(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_temperature_limits']]
obs = make_observation('air_temperature')
data = pd.Series([10, 1000, -400, 30, 20], index=default_index)
flags = tasks.validate_air_temperature(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_air_temperature(
mocker, make_observation, default_index):
obs = make_observation('air_temperature')
data = pd.DataFrame(
[(0, 0), (200, 0), (20, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_wind_speed(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_wind_limits']]
obs = make_observation('wind_speed')
data = pd.Series([10, 1000, -400, 3, 20], index=default_index)
flags = tasks.validate_wind_speed(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_wind_speed(
mocker, make_observation, default_index):
obs = make_observation('wind_speed')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_relative_humidity(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_rh_limits']]
obs = make_observation('relative_humidity')
data = pd.Series([10, 101, -400, 60, 20], index=default_index)
flags = tasks.validate_relative_humidity(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_relative_humidity(
mocker, make_observation, default_index):
obs = make_observation('relative_humidity')
data = pd.DataFrame(
[(0, 0), (200, 0), (15, 0), (40, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_ac_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ac_power_limits']]
obs = make_observation('ac_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power(mocker, make_observation,
default_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dc_power(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dc_power_limits']]
obs = make_observation('dc_power')
data = pd.Series([0, 1, -1, 0.001, 0.001], index=default_index)
flags = tasks.validate_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power(mocker, make_observation,
default_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(0, 0), (1, 0), (-1, 0), (0.001, 1), (0.001, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_daily_ghi(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('ghi')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[10, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ghi_daily(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_fetch_and_validate_observation_ghi_zeros(mocker, make_observation,
daily_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)] * 13,
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
base = (
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG |
DAILY_VALIDATION_FLAG,
base,
base,
base,
base,
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_dc_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation']]
obs = make_observation('dc_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 1000, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_dc_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_dc_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('dc_power')
data = pd.DataFrame(
[(10, 0), (1000, 0), (-100, 0), (500, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
def test_validate_daily_ac_power(mocker, make_observation, daily_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'detect_stale_values',
'detect_interpolation',
'detect_clipping']]
obs = make_observation('ac_power')
data = pd.Series(
# 8 9 10 11 12 13 14 15 16 17 18 19 23
[0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
index=daily_index)
flags = tasks.validate_daily_ac_power(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_fetch_and_validate_observation_ac_power_daily(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(10, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
BASE_FLAG = LATEST_VERSION_FLAG | DAILY_VALIDATION_FLAG
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['STALE VALUES'] |
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] | BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] |
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] |
BASE_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] |
BASE_FLAG
]
assert post_mock.called
posted_df = pd.concat([cal[0][1] for cal in post_mock.call_args_list])
assert_frame_equal(posted_df, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity', 'net_load',
])
def test_fetch_and_validate_observation_other(var, mocker, make_observation,
daily_index):
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: validate_mock})
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
assert post_mock.called
assert validate_mock.called
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_daily_validation_other(
mocker, make_observation, daily_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
mocks = [mock,
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
@pytest.mark.parametrize('var', ['net_load'])
def test_apply_daily_validation_defaults(
mocker, make_observation, daily_index, var):
mocks = [mocker.spy(tasks, 'validate_defaults'),
mocker.spy(tasks, '_validate_stale_interpolated')]
obs = make_observation(var)
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
10, 1900, -100, 500, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 0}, index=daily_index)
out = tasks.apply_daily_validation(obs, data)
assert (out['quality_flag'] | DAILY_VALIDATION_FLAG).all()
for mock in mocks:
assert mock.called
def test_apply_daily_validation(mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame({
'value': [
# 8 9 10 11 12 13 14 15 16 17 18 19 23
0, 100, -100, 100, 300, 300, 300, 300, 100, 0, 100, 0, 0],
'quality_flag': 94},
index=daily_index)
out = tasks.apply_daily_validation(obs, data)
qf = (pd.Series(LATEST_VERSION_FLAG, index=data.index),
pd.Series(DAILY_VALIDATION_FLAG, index=data.index),
pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['STALE VALUES'],
pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['INTERPOLATED VALUES'],
pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
index=data.index) *
DESCRIPTION_MASK_MAPPING['CLIPPED VALUES']
)
exp = data.copy()
exp['quality_flag'] = sum(qf)
assert_frame_equal(exp, out)
def test_apply_daily_validation_not_enough(mocker, make_observation):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0)],
index=pd.date_range(start='2019-01-01T0000Z',
end='2019-01-01T0100Z',
tz='UTC',
freq='1h'),
columns=['value', 'quality_flag'])
with pytest.raises(IndexError):
tasks.apply_daily_validation(obs, data)
def test_fetch_and_validate_all_observations(mocker, make_observation,
daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
validated = pd.Series(2, index=daily_index)
validate_mock = mocker.MagicMock(return_value=validated)
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{'dhi': validate_mock, 'dni': validate_mock})
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=False)
assert post_mock.called
assert validate_mock.call_count == 2
def test_fetch_and_validate_all_observations_only_missing(
mocker, make_observation, daily_index):
obs = [make_observation('dhi'), make_observation('dni')]
obs += [make_observation('ghi').replace(provider='Organization 2')]
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.list_observations',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs[0].provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_all_observations(
'', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
assert (post_mock.call_args_list[2][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[3][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test_fetch_and_validate_observation_only_missing(
mocker, make_observation, daily_index):
obs = make_observation('ac_power')
data = pd.DataFrame(
[(0, 0), (100, 0), (-100, 0), (100, 0), (300, 0),
(300, 0), (300, 0), (300, 0), (100, 0), (0, 0),
(100, 1), (0, 0), (0, 0)],
index=daily_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch('solarforecastarbiter.io.api.APISession.get_user_info',
return_value={'organization': obs.provider})
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values_not_flagged', # NOQA
return_value=np.array(['2019-01-01', '2019-01-02'],
dtype='datetime64[D]'))
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'token', 'obsid', data.index[0], data.index[-1], only_missing=True)
assert post_mock.called
assert (post_mock.call_args_list[0][0][1].index.date ==
dt.date(2019, 1, 1)).all()
assert (post_mock.call_args_list[1][0][1].index.date ==
dt.date(2019, 1, 2)).all()
def test__group_continuous_week_post(mocker, make_observation):
split_dfs = [
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-03T00:00',
end='2020-05-03T23:59',
tz='UTC',
freq='1h')),
# new week split
pd.DataFrame([(0, LATEST_VERSION_FLAG)],
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T00:00',
end='2020-05-04T11:59',
tz='UTC',
freq='1h')),
# missing 12
pd.DataFrame(
[(0, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(1, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-04T13:00',
end='2020-05-04T20:00',
tz='UTC',
freq='1h')),
# missing a week+
pd.DataFrame(
[(9, LATEST_VERSION_FLAG | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'])] + # NOQA
[(3, LATEST_VERSION_FLAG)] * 7,
columns=['value', 'quality_flag'],
index=pd.date_range(
start='2020-05-13T09:00',
end='2020-05-13T16:59',
tz='UTC',
freq='1h')),
]
ov = pd.concat(split_dfs, axis=0)
obs = make_observation('ghi')
session = mocker.MagicMock()
tasks._group_continuous_week_post(session, obs, ov)
call_list = session.post_observation_values.call_args_list
assert len(call_list) == 4
for i, cal in enumerate(call_list):
assert_frame_equal(split_dfs[i], cal[0][1])
@pytest.mark.parametrize('vals,func', [
(pd.DataFrame({'value': 0, 'quality_flag': 4}, index=pd.DatetimeIndex(
[pd.Timestamp.utcnow()], name='timestamp')),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 5 + [None] * 10, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='2h',
periods=15)),
'apply_immediate_validation'),
(pd.DataFrame({'value': [0.0] * 15 + [None] * 11, 'quality_flag': 4},
index=pd.date_range('now', name='timestamp', freq='1h',
periods=26)),
'apply_daily_validation'),
])
def test_apply_validation(make_observation, mocker, vals, func):
obs = make_observation('ac_power')
fmock = mocker.patch.object(tasks, func, autospec=True)
tasks.apply_validation(obs, vals)
assert fmock.called
def test_apply_validation_empty(make_observation, mocker):
obs = make_observation('dhi')
daily = mocker.patch.object(tasks, 'apply_daily_validation')
immediate = mocker.patch.object(tasks, 'apply_immediate_validation')
data = pd.DataFrame({'value': [], 'quality_flag': []},
index=pd.DatetimeIndex([], name='timestamp'))
out = tasks.apply_validation(obs, data)
assert_frame_equal(out, data)
assert not daily.called
assert not immediate.called
def test_apply_validation_bad_df(make_observation, mocker):
obs = make_observation('dhi')
data = pd.DataFrame()
with pytest.raises(TypeError):
tasks.apply_validation(obs, data)
with pytest.raises(TypeError):
tasks.apply_validation(obs, pd.Series(
index=pd.DatetimeIndex([]),
dtype=float))
def test_apply_validation_agg(aggregate, mocker):
data = pd.DataFrame({'value': [1], 'quality_flag': [0]},
index=pd.DatetimeIndex(
['2020-01-01T00:00Z'], name='timestamp'))
out = tasks.apply_validation(aggregate, data)
assert_frame_equal(data, out)
def test_find_unvalidated_time_ranges(mocker):
session = mocker.MagicMock()
session.get_observation_values_not_flagged.return_value = np.array(
['2019-04-13', '2019-04-14', '2019-04-15', '2019-04-16', '2019-04-18',
'2019-05-22', '2019-05-23'], dtype='datetime64[D]')
obs = mocker.MagicMock()
obs.observation_id = ''
obs.site.timezone = 'UTC'
out = list(tasks._find_unvalidated_time_ranges(
session, obs, '2019-01-01T00:00Z', '2020-01-01T00:00Z'))
assert out == [
(pd.Timestamp('2019-04-13T00:00Z'), pd.Timestamp('2019-04-17T00:00Z')),
(pd.Timestamp('2019-04-18T00:00Z'), pd.Timestamp('2019-04-19T00:00Z')),
(
|
pd.Timestamp('2019-05-22T00:00Z')
|
pandas.Timestamp
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import plot_model
import os
import tensorflow_model_optimization as tfmot
from pathlib import Path
import modules.config as cn
from modules.models import get_model
from modules.preprocessing import fetch_data
import modules.utils as utils
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, roc_auc_score
from scipy.special import softmax
def train_test(params):
"""[This method performs the model training and tests on the target domain at the end of training.]"""
# Create directory for unique logs
my_dir = (
str(cn.DATASET_COMBINATION[params["combination"]])
+ "_"
+ str(params["architecture"])
+ "_"
+ str(params["loss_function"])
+ "_"
+ str(params["lambda_loss"])
)
if not params["technique"]:
my_dir = my_dir + "_Original"
if params["prune"]:
tf.compat.v1.logging.info("Pruning is activated")
my_dir = my_dir + "_" + str(params["prune_val"])
assert os.path.exists(cn.LOGS_DIR), "LOGS_DIR doesn't exist"
experiment_logs_path = os.path.join(cn.LOGS_DIR, my_dir)
Path(experiment_logs_path).mkdir(parents=True, exist_ok=True)
utils.define_logger(os.path.join(experiment_logs_path, "experiments.log"))
tf.compat.v1.logging.info("\n")
tf.compat.v1.logging.info("Parameters: " + str(params))
assert (
params["mode"].lower() == "train_test"
), "change training mode to 'train_test'"
tf.compat.v1.logging.info(
"Fetched the architecture function: " + params["architecture"]
)
if params["use_multiGPU"]:
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
print("Number of devices: {}".format(strategy.num_replicas_in_sync))
# Open a strategy scope.
with strategy.scope():
model = None
tf.compat.v1.logging.info("Using Mutliple GPUs for training ...")
tf.compat.v1.logging.info("Building the model ...")
model = get_model(
input_shape=params["input_shape"],
num_classes=params["output_classes"],
lambda_loss=params["lambda_loss"],
additional_loss=params["loss_function"],
prune=params["prune"],
prune_val=params["prune_val"],
technique=params["technique"],
)
# print(model.summary())
""" Model Compilation """
tf.compat.v1.logging.info("Compiling the model ...")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=params["learning_rate"]),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
else:
# Create model
tf.compat.v1.logging.info("Building the model ...")
model = None
model = get_model(
input_shape=params["input_shape"],
num_classes=params["output_classes"],
lambda_loss=params["lambda_loss"],
additional_loss=params["loss_function"],
prune=params["prune"],
prune_val=params["prune_val"],
technique=params["technique"],
)
# print(model.summary())
""" Model Compilation """
tf.compat.v1.logging.info("Compiling the model ...")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=params["learning_rate"]),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
""" Create callbacks """
tf.compat.v1.logging.info("Creating the callbacks ...")
callbacks, log_dir = utils.callbacks_fn(params, my_dir)
tf.compat.v1.logging.info("Calling data preprocessing pipeline...")
ds_train, ds_test = fetch_data(params)
""" Model Training """
tf.compat.v1.logging.info("Training Started....")
hist = None
hist = model.fit(
ds_train,
validation_data=ds_test,
epochs=params["epochs"],
verbose=1,
callbacks=callbacks,
)
tf.compat.v1.logging.info("Training finished....")
""" Plotting """
tf.compat.v1.logging.info("Creating accuracy & loss plots...")
utils.loss_accuracy_plots(
hist=hist,
log_dir=log_dir,
params=params,
)
""" Evaluate on Target Dataset"""
results = model.evaluate(ds_test)
tf.compat.v1.logging.info(
f"Test Set evaluation results for run {Path(log_dir).name} : Accuracy: {results[1]}, Loss: {results[0]}"
)
""" Model Saving """
if params["save_model"]:
tf.compat.v1.logging.info("Saving the model...")
model_path = os.path.join(
cn.MODEL_PATH, (Path(log_dir).parent).name, Path(log_dir).name
)
Path(model_path).mkdir(parents=True, exist_ok=True)
model.save(os.path.join(model_path, "model"))
tf.compat.v1.logging.info(f"Model successfully saved at: {model_path}")
""" Pruned Model Saving """
if params["prune"]:
model_for_export = tfmot.sparsity.keras.strip_pruning(model)
tf.compat.v1.logging.info(f"Pruned Model summary: {model_for_export.summary()}")
tf.compat.v1.logging.info("Saving Pruned Model...")
model_path = os.path.join(
cn.MODEL_PATH, (Path(log_dir).parent).name, Path(log_dir).name
)
Path(model_path).mkdir(parents=True, exist_ok=True)
model_for_export.save(os.path.join(model_path, "pruned_model"))
tf.compat.v1.logging.info(f"Pruned Model successfully saved at: {model_path}")
tf.compat.v1.logging.info(
"Size of gzipped pruned model without stripping: %.2f bytes"
% (utils.get_gzipped_model_size(model))
)
tf.compat.v1.logging.info(
"Size of gzipped pruned model with stripping: %.2f bytes"
% (utils.get_gzipped_model_size(model_for_export))
)
return model, hist, results
def evaluate(model_path, params, figsize=(20, 15)):
"""[This method generates a Heat-Map, provides Confusion matrix and provides
classification report and AUC score.]
Args:
model_path ([keras.Model]): [path of trained keras model]
params ([dict]): [Argparse dictionary]
figsize (tuple): [Plot figure size]
"""
plt.close("all")
font = {"family": "serif", "weight": "bold", "size": 10}
plt.rc("font", **font)
plt.xticks(rotation=90)
plt.yticks(rotation=90)
files_path = os.path.join(cn.BASE_DIR, (Path(model_path).parent).name)
Path(files_path).mkdir(parents=True, exist_ok=True)
utils.define_logger(os.path.join(files_path, "evaluations.log"))
tf.compat.v1.logging.info("Fetch the test dataset ...")
_, ds_test = fetch_data(params)
true_categories = tf.concat([y for x, y in ds_test], axis=0)
np.save(os.path.join(files_path, "y_true"), true_categories.numpy())
tf.compat.v1.logging.info("Loading the trained model ...")
model = keras.models.load_model(model_path)
tf.compat.v1.logging.info("Recompiling the model ...")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=params["learning_rate"]),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
tf.compat.v1.logging.info("Predict the classes on the test dataset ...")
y_pred = model.predict(ds_test)
np.save(os.path.join(files_path, "y_prob"), y_pred)
predicted_categories = tf.argmax(y_pred, axis=1)
np.save(
os.path.join(files_path, "predicted_categories"), predicted_categories.numpy()
)
tf.compat.v1.logging.info("Generating Classification Report ...")
report = classification_report(
true_categories, predicted_categories, output_dict=True
)
df =
|
pd.DataFrame(report)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
|
assert_series_equal(s, expected)
|
pandas.util.testing.assert_series_equal
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
from ibis.expr import datatypes as dt
from ibis.expr import schema as sch
pytestmark = pytest.mark.pandas
@pytest.mark.parametrize(
('column', 'expected_dtype'),
[
([True, False, False], dt.boolean),
(np.int8([-3, 9, 17]), dt.int8),
(np.uint8([3, 0, 16]), dt.uint8),
(np.int16([-5, 0, 12]), dt.int16),
(np.uint16([5569, 1, 33]), dt.uint16),
(np.int32([-12, 3, 25000]), dt.int32),
(np.uint32([100, 0, 6]), dt.uint32),
(np.uint64([666, 2, 3]), dt.uint64),
(np.int64([102, 67228734, -0]), dt.int64),
(np.float32([45e-3, -0.4, 99.0]), dt.float),
(np.float64([-3e43, 43.0, 10000000.0]), dt.double),
(['foo', 'bar', 'hello'], dt.string),
(
[
pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
],
dt.timestamp,
),
(
pd.date_range('20130101', periods=3, tz='US/Eastern'),
dt.Timestamp('US/Eastern'),
),
(
[
pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997'),
],
dt.Interval('ns'),
),
(pd.Series(['a', 'b', 'c', 'a']).astype('category'), dt.Category()),
],
)
def test_infer_simple_dataframe(column, expected_dtype):
df = pd.DataFrame({'col': column})
assert sch.infer(df) == ibis.schema([('col', expected_dtype)])
def test_infer_exhaustive_dataframe():
df = pd.DataFrame(
{
'bigint_col': np.array(
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype='i8'
),
'bool_col': np.array(
[
True,
False,
True,
False,
True,
None,
True,
False,
True,
False,
],
dtype=np.bool_,
),
'bool_obj_col': np.array(
[
True,
False,
np.nan,
False,
True,
np.nan,
True,
np.nan,
True,
False,
],
dtype=np.object_,
),
'date_string_col': [
'11/01/10',
None,
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
],
'double_col': np.array(
[
0.0,
10.1,
np.nan,
30.299999999999997,
40.399999999999999,
50.5,
60.599999999999994,
70.700000000000003,
80.799999999999997,
90.899999999999991,
],
dtype=np.float64,
),
'float_col': np.array(
[
np.nan,
1.1000000238418579,
2.2000000476837158,
3.2999999523162842,
4.4000000953674316,
5.5,
6.5999999046325684,
7.6999998092651367,
8.8000001907348633,
9.8999996185302734,
],
dtype='f4',
),
'int_col': np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i4'),
'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],
'smallint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i2'
),
'string_col': [
'0',
'1',
None,
'double , whammy',
'4',
'5',
'6',
'7',
'8',
'9',
],
'timestamp_col': [
pd.Timestamp('2010-11-01 00:00:00'),
None,
pd.Timestamp('2010-11-01 00:02:00.100000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
pd.Timestamp('2010-11-01 00:04:00.600000'),
pd.Timestamp('2010-11-01 00:05:00.100000'),
pd.Timestamp('2010-11-01 00:06:00.150000'),
pd.Timestamp('2010-11-01 00:07:00.210000'),
pd.Timestamp('2010-11-01 00:08:00.280000'),
pd.Timestamp('2010-11-01 00:09:00.360000'),
],
'tinyint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i1'
),
'year': [
2010,
2010,
2010,
2010,
2010,
2009,
2009,
2009,
2009,
2009,
],
}
)
expected = [
('bigint_col', dt.int64),
('bool_col', dt.boolean),
('bool_obj_col', dt.boolean),
('date_string_col', dt.string),
('double_col', dt.double),
('float_col', dt.float),
('int_col', dt.int32),
('month', dt.int64),
('smallint_col', dt.int16),
('string_col', dt.string),
('timestamp_col', dt.timestamp),
('tinyint_col', dt.int8),
('year', dt.int64),
]
assert sch.infer(df) == ibis.schema(expected)
def test_apply_to_schema_with_timezone():
data = {'time': pd.date_range('2018-01-01', '2018-01-02', freq='H')}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
#all imports (initially required)
import os
import shutil
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import rc
if os.path.exists('vis_results/'):
print('previous vis result found')
shutil.rmtree('vis_results/')
print('vis_results deleted')
else:
print('vis_results not found, will be created')
os.mkdir('vis_results/')
print('vis_results created')
#figure parameter set up
plt.rcParams['figure.dpi'] = 800
plt.rcParams["figure.figsize"] = [15.0, 6.0]
# get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
# get_ipython().run_line_magic('matplotlib', 'inline')
#total uniprot reveiwed sequences
df_uniprot = pd.read_csv('uniprot-reviewed_04_12.tab', sep = '\t')
print('uniprt database loaded')
print('length of Uniprot datasize: ', len(df_uniprot))
#total results
df_result = pd.read_csv('ECPred_result_15_12.csv',sep = '\t', low_memory=False)
df_result = df_result[['Entry','EC_Predicted']]
print('length of result datasize: ', len(df_result))
df = pd.merge(df_uniprot,df_result, on = ['Entry'], how = 'left')
df = df[df['EC_Predicted'].notnull()]
df_EC = df[['Entry','EC number','EC_Predicted','Length','Sequence']]
total_match = df_EC[df_EC['EC_Predicted']== df_EC['EC number']]
total_match.to_csv('vis_results/total_match.csv')
no_prediction = df_EC[df_EC['EC_Predicted'] == 'no Prediction']
no_prediction.to_csv('vis_results/no_prediction.csv')
no_prediction_but_E = no_prediction[no_prediction['EC number'].notnull()]
no_prediction_but_E.to_csv('vis_results/no_prediction_but_enzyme.csv')
non_enzyme_correct = df_EC[(df_EC['EC_Predicted'] == 'non Enzyme') & (df_EC['EC number'].isnull())]
non_enzyme_correct.to_csv('vis_results/non_enzyme_correct.csv')
PNEBE = df_EC[(df_EC['EC_Predicted'] == 'non Enzyme') & (df_EC['EC number'].isnull() == False)]
PNEBE.to_csv('vis_results/predicted_non_enzyme_but_enzyme.csv')
a = ((df_EC['EC_Predicted'] == 'no Prediction') == False)
b = ((df_EC['EC_Predicted'] == 'non Enzyme') == False)
c = (df_EC['EC number'].isnull())
PEBnonE = df_EC[a&b&c]
PEBnonE.to_csv('vis_results/predicted_enzyme_but_non_enzyme')
new_digit = df_EC.copy()
new_digit = new_digit[(new_digit['EC number'].isnull() == False)]
new_digit = new_digit[(new_digit['EC_Predicted'] == 'no Prediction') == False]
new_digit = new_digit[(new_digit['EC_Predicted'] == 'non Enzyme') == False]
new_digit = new_digit[(new_digit['EC_Predicted'] == new_digit['EC number']) == False]
def EC_separator(new_digit):
lst = []
EC_number = list(new_digit['EC number'])
EC_predicted = list(new_digit['EC_Predicted'])
for j in range(len(EC_number)):
kst = []
for i in EC_number[j].split('; '):
a = 'first not match'
if EC_predicted[j].split('.')[0] == i.split('.')[0]:
a = 'first matched'
kst.append(a)
break
kst.append(a)
for i in EC_number[j].split('; '):
b = 'second not match'
if EC_predicted[j].split('.')[1] == '-' :
b = 'second blank'
kst.append(b)
break
elif EC_predicted[j].split('.')[1] == i.split('.')[1]:
b = 'second matched'
kst.append(b)
break
kst.append(b)
for i in EC_number[j].split('; '):
c = 'third not matched'
if EC_predicted[j].split('.')[2] == '-' :
c = 'third blank'
kst.append(c)
break
elif EC_predicted[j].split('.')[2] == i.split('.')[2]:
c = 'third matched'
kst.append(c)
break
kst.append(c)
for i in EC_number[j].split('; '):
d = 'fourth not match'
if EC_predicted[j].split('.')[3] == '-' :
d = 'fourth blank'
kst.append(d)
break
elif EC_predicted[j].split('.')[3] == i.split('.')[3]:
d = 'fourth matched'
kst.append(d)
break
kst.append(d)
lst.append(kst)
return lst
lst = EC_separator(new_digit)
for i in range(4):
new_digit['match'+str(i+1)] = [j[i] for j in lst]
a = new_digit['match1'] == 'first matched'
b = new_digit['match2'] == 'second matched'
c = new_digit['match3'] == 'third matched'
d = new_digit['match4'] == 'fourth matched'
total_correct = new_digit[a & b & c & d]
total_correct.to_csv('vis_results/correct_to_fourth_digit.csv')
third_digit_correct = new_digit[a & b & c & (d == False)]
third_digit_correct.to_csv('vis_results/correct_to_third_digit.csv')
second_digit_correct = new_digit[a & b & (c == False) & (d == False)]
second_digit_correct.to_csv('vis_results/correct_to_second_digit.csv')
first_digit_correct = new_digit[a & (b == False) & (c == False) & (d == False)]
first_digit_correct.to_csv('vis_results/correct_to_first_digit.csv')
first_digit_wrong = new_digit[a == False]
first_digit_wrong.to_csv('vis_results/first_digit_wrong.csv')
dataframes = [no_prediction,
PNEBE,
PEBnonE,
non_enzyme_correct,
total_match,
third_digit_correct,
second_digit_correct,
first_digit_correct,
first_digit_wrong]
lengths = [len(i) for i in dataframes[0:4]]
digits = [len(i) for i in dataframes[4:]]
lengths.append(sum(digits))
percent_lengths = [i/sum(lengths) for i in lengths]
percent_digits = [i/sum(digits) for i in digits]
barwidth = 1
# create data
x = ['ECpred \noverall \nperformance']
label_list_lengths = ['No Prediction',
'Predicted Non-Enzyme but Enzyme',
'Predicted Enzyme but non-Enzyme',
'Non Enzyme Correct',
'Predicted EC number']
y1 = percent_lengths[4]
y2 = percent_lengths[3]
y3 = percent_lengths[2]
y4 = percent_lengths[1]
y5 = percent_lengths[0]
color = ['#54d157']
# plot bars in stack manner
plt.bar(x, y1, color= 'b')#color[0])
plt.bar(x, y2, bottom=y1, color='c')
plt.bar(x, y3, bottom=y1+y2, color='y')
plt.bar(x, y4, bottom=y1+y2+y3, color= 'g')
plt.bar(x, y5, bottom= y1+y2+y3+y4, color = 'r')#'#ff9500')
# plt.legend(label_list_lengths[::-1], bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')
# create data
x = ['ECpred \ndigits']
label_list_digits = ['Total correct',
'third digit correct',
'second digit correct',
'first digit correct',
'first digit wrong']
y1 = percent_digits[0]
y2 = percent_digits[1]
y3 = percent_digits[2]
y4 = percent_digits[3]
y5 = percent_digits[4]
# plot bars in stack manner
colors = ['#377697','#4fa9d9','#72bae0','#a7d4ec','#fdab91']
plt.rcParams["figure.figsize"] = [5,5]
plt.bar(x, y1, color= colors[0])#, width = barwidth)
plt.bar(x, y2, bottom=y1, color = colors[1])#, width = barwidth), width = barwidth)
plt.bar(x, y3, bottom=y1+y2,color = colors[2])#, width = barwidth), width = barwidth)
plt.bar(x, y4, bottom=y1+y2+y3, color = colors[3])#, width = barwidth), width = barwidth)
plt.bar(x, y5, bottom= y1+y2+y3+y4, color = colors[4])#, width = barwidth), width = barwidth)
plt.ylabel("Fraction of the Whole")
# plt.legend(label_list_digits, bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')
plt.savefig('vis_results/overall_performance.png', format='png', dpi=800)
ranges = list(np.linspace(0,1000,21))
ranges.append(int(100000))
# print('ranges', ranges)
def count_ranges(dataframe):
range_ = dataframe.groupby(pd.cut(dataframe.Length, ranges)).count().Length
range_ = pd.DataFrame(range_)
range_ = range_.rename(columns={'Length': 'name'})
range_ = [i for i in range_['name']]
return range_
dataframes = [total_match,
third_digit_correct,
second_digit_correct,
first_digit_correct,
first_digit_wrong,
no_prediction]
list_ranges = []
for i in dataframes:
list_ranges.append(count_ranges(i))
# print(list_ranges)
r = [i for i in range(len(ranges)-1)]
# plot
barWidth = 1
# names = [0,100,200,300,400,500,600,700,800,900,1000,100000]
names = [(str(ranges[i])+'-'+str(ranges[i+1])) for i in range(len(ranges)-1)]
raw_data = {'c': list(list_ranges[0]),
'd': list(list_ranges[1]),
'e': list(list_ranges[2]),
'f': list(list_ranges[3]),
'g': list(list_ranges[4]),
'h': list(list_ranges[5])}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j+k+l+m+n for i,j,k,l,m,n in zip(df['c'],df['d'], df['e'], df['f'], df['g'], df['h'])]
greenBars = [i / j * 100 for i,j in zip(df['h'], totals)]
orangeBars = [i / j * 100 for i,j in zip(df['g'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['f'], totals)]
redBars = [i / j * 100 for i,j in zip(df['e'], totals)]
red1Bars = [i / j * 100 for i,j in zip(df['d'], totals)]
red2Bars = [i / j * 100 for i,j in zip(df['c'], totals)]
color = ['#016262','#279292','#06C1C1','#66FFFF','#FFCC99','#ff7f50']
# Create green Bars
plt.bar(r, greenBars, color=color[-1], edgecolor='white', width=barWidth)
# Create orange Bars
plt.bar(r, orangeBars, bottom=greenBars, color=color[-2], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, blueBars, bottom=[i+j for i,j in zip(greenBars, orangeBars)], color=color[-3], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, orangeBars,blueBars)], color= color[-4], edgecolor='white', width=barWidth)
#
plt.bar(r, red1Bars, bottom=[i+j+k+l for i,j,k,l in zip(greenBars, orangeBars, blueBars, redBars)], color=color[-5], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, red2Bars, bottom=[i+j+k+l+m for i,j,k,l,m in zip(greenBars, orangeBars,blueBars,redBars,red1Bars)], color=color[-6], edgecolor='white', width=barWidth)
legends = ['Total match','Third digit', 'Second digit', 'First digit', 'Wrong prediction', 'No prediction']
legends = legends[::-1]
plt.ylim(0,100)
# Custom x axis
plt.xticks(r, names, rotation = 90)
# plt.yticks(ticks = None, labels = None)
# plt.legend(legends, bbox_to_anchor=(1.5,1.5), ncol=1,loc='upper right')
# Show graphic
# plt.tick_params(axis='x', which='both', bottom=True,top=False, labelbottom= True)
# plt.xticks(r, names, rotation = 90)
plt.ylabel('Percentage of the Total')
plt.xlabel('bin edges')
plt.savefig('vis_results/performance_wrt_length.png', format='png', dpi=800)
plt.show()
dataframes = [total_match,
third_digit_correct,
second_digit_correct,
first_digit_correct,
first_digit_wrong,
no_prediction_but_E]
EC_list = []
for dataframe in dataframes:
EC_list.append([i.split('.')[0] for i in dataframe['EC number']])
EC = ['1','2','3','4','5','6']
diction = {}
vector = {'c':EC_list[0],'d':EC_list[1],'e':EC_list[2],'f':EC_list[3],'g':EC_list[4],'h':EC_list[5]}
for i in vector:
for j in range(len(EC)):
key = i+'EC'+EC[j]
value = vector[i].count(EC[j])
diction[key] = value
keys = []
for i in range(len(EC)):
f = []
for j in vector:
key = j+'EC'+EC[i]
f.append(key)
keys.append(f)
list_EC = []
for i in range(6):
a = []
for j in range(6):
a.append(diction[keys[i][j]])
list_EC.append(a)
list_EC = list(np.array(list_EC).T)
r = [0,1,2,3,4,5]
# plot
barWidth = 1
names = ('EC1','EC2','EC3','EC4','EC5','EC6')
# Data
r = [0,1,2,3,4,5]
raw_data = {'c': list_EC[5],
'd': list_EC[4],
'e': list_EC[3],
'f': list_EC[2],
'g': list_EC[1],
'h': list_EC[0]}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j+k+l+m+n for i,j,k,l,m,n in zip(df['c'],df['d'], df['e'], df['f'],df['g'],df['h'])]
greenBars = [i / j * 100 for i,j in zip(df['c'], totals)]
orangeBars = [i / j * 100 for i,j in zip(df['d'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['e'], totals)]
redBars = [i / j * 100 for i,j in zip(df['f'], totals)]
red1Bars = [i / j * 100 for i,j in zip(df['g'], totals)]
red2Bars = [i / j * 100 for i,j in zip(df['h'], totals)]
color = ['#016262','#279292','#06C1C1','#66FFFF','#FFCC99','#FF0000']
# Create green Bars
plt.bar(r, greenBars, color=color[-1], edgecolor='white', width=barWidth)
# Create orange Bars
plt.bar(r, orangeBars, bottom=greenBars, color=color[-2], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, blueBars, bottom=[i+j for i,j in zip(greenBars, orangeBars)], color=color[-3], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, orangeBars,blueBars)], color= color[-4], edgecolor='white', width=barWidth)
#
plt.bar(r, red1Bars, bottom=[i+j+k+l for i,j,k,l in zip(greenBars, orangeBars, blueBars, redBars)], color=color[-5], edgecolor='white', width=barWidth)
# Create blue Bars
plt.bar(r, red2Bars, bottom=[i+j+k+l+m for i,j,k,l,m in zip(greenBars, orangeBars,blueBars,redBars,red1Bars)], color=color[-6], edgecolor='white', width=barWidth)
legends = ['Total match', 'Third digit', 'Second digit', 'First digit', 'Wrong prediction', 'No prediction but Enzyme']
legends = legends[::-1]
# Custom x axis
plt.ylim(0,110)
# plt.yticks(color='w')
plt.xticks(r, names)
plt.ylabel('Percentage of the total')
plt.xlabel('\nEC classes')
# plt.tick_params(axis='x', which='both', bottom=False,top=False, labelbottom= False)
# plt.tick_params(axis='y', which='both', bottom=False,top=False, labelbottom= False)
# plt.legend(legends, bbox_to_anchor=(1.5,1.5), ncol=1,loc='upper right')
# Show graphic
plt.savefig('vis_results/Performance_on_EC_number.png', format='png', dpi=800)
plt.show()
plt.rcParams["figure.figsize"] = [25.0, 10.0]
plt.rcParams["savefig.bbox"] = 'tight'
# %matplotlib inline
error_dataframe = [no_prediction_but_E,PNEBE,first_digit_wrong]
def error_count(error_dataframe):
error_name = ['no_prediction','PNEBE','first_digit_wrong']
plt.subplots(1,3)
for i in range(len(error_dataframe)):
print(type(i))
cnt = Counter(error_dataframe[i]['EC number'])
top = 20
sorted_class = cnt.most_common()[:top]
classes = [c[0] for c in sorted_class]
counts = [c[1] for c in sorted_class]
plt.subplot(1,3,i+1)
plt.bar(range(len(classes)),counts)
plt.xticks(range(len(classes)), classes, fontsize = 10, rotation = 'vertical')
plt.title(error_name[i], fontsize = 20)
plt.savefig('vis_results/top_mistakes.png', format = 'png', dpi = 1000)
plt.show()
error_count(error_dataframe)
#figure parameter set up
plt.rcParams['figure.dpi'] = 800
plt.rcParams["figure.figsize"] = [15.0, 6.0]
def frac_of_aa(dataframe,i):
dataframe['Frac_Counter_'+str(i)] = dataframe.apply(lambda row: row['Sequence'].count(str(i))/row['Length'], axis = 1)
return dataframe
list_aa = 'ARNDCQEGHILKMFPSTWYV'
for i in list_aa:
percent_aa = frac_of_aa(df_EC,str(i))
enz = percent_aa[percent_aa['EC number'].isnull() == False]
nonenz = percent_aa[percent_aa['EC number'].isnull()]
import matplotlib.pyplot as ax
dataframes = [enz,nonenz]
def point_finder(dataframe,amino_acid):
bins = 30
list_frac = list(dataframe['Frac_Counter_'+str(amino_acid)])
zero_out = list(filter(lambda a: a != 0, list_frac))
zero_out.sort(reverse=False)
list_points = [-.01]
for i in [zero_out[len(zero_out)*i//bins] for i in range(1,bins)]:
list_points.append(i)
list_points.append(max(zero_out)+.01)
return list_points
def number_of_aa(dataframe,i):
dataframe['Frac_Counter'] = dataframe.apply(lambda row: row['Sequence'].count(str(i))/row['Length'], axis = 1)
return dataframe
def count_ranges_aa(dataframe, percent_aa, amino_acid):
aa_range = point_finder(percent_aa, amino_acid)
range_aa = dataframe.groupby(pd.cut(dataframe.Frac_Counter, aa_range)).count().Frac_Counter
range_aa = pd.DataFrame(range_aa)
range_aa = range_aa.rename(columns={'Frac_Counter': 'name'})
range_aa = [i for i in range_aa['name']]
return range_aa, aa_range
def plot_aa_enz_non_enz(dataframes, amino_acid_list):
barWidth = 1
color = ['#016262','#ff7f50']
a = 0
ax.rcParams["figure.figsize"] = [60.0, 15.0]
ax.subplots(len(amino_acid_list)//5,5)
point_dict = {}
for amino_acid in amino_acid_list:
a += 1
for i in dataframes:
df = number_of_aa(i, amino_acid)
aa_list_ranges = []
for i in dataframes:
range_a, ranges = count_ranges_aa(i, percent_aa, amino_acid)
point_dict[amino_acid] = ranges
aa_list_ranges.append(range_a)
raw_data = {'d': list(aa_list_ranges[0]),
'e': list(aa_list_ranges[1])}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j for i,j in zip(df['d'], df['e'])]
greenBars = [i / j * 100 for i,j in zip(df['d'], totals)]
orangeBars = [i / j * 100 for i,j in zip(df['e'], totals)]
r = [i for i in range(len(greenBars))]
names = [str(round(ranges[i],3))+'-'+str(round(ranges[i+1],3)) for i in range(len(ranges)-1)]
ax.subplot(len(amino_acid_list)//5,5,a)
# ax.tick_params(axis='x', which='both', bottom=False,top=False, labelbottom=False)
# ax.tick_params(axis='y', which='both', bottom=False,top=False, labelbottom=False)
ax.bar(r, greenBars, color= color[0], edgecolor='white', width=barWidth)
ax.bar(r, orangeBars, bottom=greenBars, color=color[1], edgecolor='white', width=barWidth)
ax.ylim(0,100)
ax.yticks([])
ax.ylabel(amino_acid, fontsize = 58, loc= 'center', labelpad = 28, rotation = 0 )
ax.xticks(r, names, fontsize= 7, rotation = 45)
point_dict[amino_acid] = ranges
point_dict_dataframe = pd.DataFrame(point_dict)
point_dict_dataframe.to_csv('vis_results/bin_edge_number.csv')
ax.tight_layout()
ax.savefig('vis_results/fractional_composition_of_enzyme&non-enzyme.png', format = 'png', dpi = 800)
ax.show()
list_aa = 'ARNDCQEGHILKMFPSTWYV'
list_aa = list(list_aa)
# len(list_aa)
plot_aa_enz_non_enz(dataframes,list_aa)
import matplotlib.pyplot as ax
dataframes = [total_match,
third_digit_correct,
second_digit_correct,
first_digit_correct,
first_digit_wrong,
no_prediction]
def point_finder(dataframe,amino_acid):
bins = 12
list_frac = list(dataframe['Frac_Counter_'+str(amino_acid)])
zero_out = list(filter(lambda a: a != 0, list_frac))
zero_out.sort(reverse=False)
list_points = [-.01]
for i in [zero_out[len(zero_out)*i//bins] for i in range(1,bins)]:
list_points.append(i)
list_points.append(max(zero_out)+.01)
return list_points
def number_of_aa(dataframe,i):
dataframe['Frac_Counter'] = dataframe.apply(lambda row: row['Sequence'].count(str(i))/row['Length'], axis = 1)
return dataframe
def count_ranges_aa(dataframe, percent_aa, amino_acid):
aa_range = point_finder(percent_aa, amino_acid)
range_aa = dataframe.groupby(
|
pd.cut(dataframe.Frac_Counter, aa_range)
|
pandas.cut
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
from email.policy import default
import os
import sys
import argparse
import pandas as pd
from datetime import datetime
# diable all debugging logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action="ignore", category=DataConversionWarning)
import tensorflow as tf
import numpy as np
import hyperparameters as hp
from autoencoders import (
vanilla_autoencoder,
variational_autoencoder,
convolutional_autoencoder,
)
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_curve, roc_auc_score, plot_roc_curve
import matplotlib.pyplot as plt
import matplotlib
# from utils import *
def parse_args():
""" Perform command-line argument parsing. """
parser = argparse.ArgumentParser(
description="arguments parser for models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--autoencoder-model",
default="vanilla",
help="types of model to use, vanilla, convolutional, variational",
)
parser.add_argument(
"--classifier-model",
default="all",
help="types of model to use, all, xgb, randomforest, logreg",
)
parser.add_argument("--omics-data", default=None, help="omics data file name")
parser.add_argument("--biomed-data", default=None, help="biomed data file name")
parser.add_argument("--merged-data", default=None, help="merged data file name")
parser.add_argument(
"--load-autoencoder",
default=None,
help="path to model checkpoint file, should be similar to ./output/checkpoints/041721-201121/epoch19",
)
parser.add_argument(
"--train-autoencoder", action="store_true", help="train the autoencoder model"
)
parser.add_argument(
"--no-save",
action="store_true",
help="not save the checkpoints, logs and model. Used only for develop purpose",
)
parser.add_argument(
"--train-classifier", action="store_true", help="train the classifier model"
)
parser.add_argument(
"--classifier-data",
default=None,
help="merged, omics, biomed, encoded_omics. The encoding process will take place during the classification process. So even when choose encoded_omics, only the raw omics data is required as input.",
)
parser.add_argument(
"--save-encoded-omics",
action="store_true",
help="save the encoded omics data features",
)
return parser.parse_args()
class CustomModelSaver(tf.keras.callbacks.Callback):
def __init__(self, checkpoint_dir):
super(CustomModelSaver, self).__init__()
self.checkpoint_dir = checkpoint_dir
def on_epoch_end(self, epoch, logs=None):
save_name = "epoch_{}".format(epoch)
tf.keras.models.save_model(
self.model, self.checkpoint_dir + os.sep + save_name, save_format="tf"
)
def autoencoder_loss_fn(model, input_features):
decode_error = tf.losses.mean_squared_error(model(input_features), input_features)
return decode_error
def autoencoder_train(loss_fn, model, optimizer, input_features, train_loss):
with tf.GradientTape() as tape:
loss = loss_fn(model, input_features)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
def main():
print("===== starting =====")
time_now = datetime.now()
timestamp = time_now.strftime("%m%d%y-%H%M%S")
gpus = tf.config.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
print("===== loading omics data =====")
omics_data = pd.read_csv(ARGS.omics_data, index_col=0).T.astype("float32")
(num_patients, num_features) = omics_data.shape
print(
"{} contains {} patients with {} features".format(
ARGS.omics_data.split("/")[-1], num_patients, num_features
)
)
checkpoint_path = (
"./output"
+ os.sep
+ "{}_{}_{}".format(timestamp, ARGS.autoencoder_model, ARGS.omics_data.split("/")[-1][:-4])
+ os.sep
+ "checkpoints"
)
logs_path = (
"./output"
+ os.sep
+ "{}_{}_{}".format(timestamp, ARGS.autoencoder_model, ARGS.omics_data.split("/")[-1][:-4])
+ os.sep
+ "logs"
)
if not ARGS.no_save:
print("checkpoint file saved at {}".format(checkpoint_path))
print("log file save as {}".format(logs_path))
logs_path = os.path.abspath(logs_path)
checkpoint_path = os.path.abspath(checkpoint_path)
if (
not os.path.exists(checkpoint_path)
and not os.path.exists(logs_path)
and ARGS.train_autoencoder
):
os.makedirs(checkpoint_path)
os.makedirs(logs_path)
if ARGS.autoencoder_model == "vanilla":
autoencoder = vanilla_autoencoder(
latent_dim=hp.latent_dim,
intermediate_dim=hp.intermediate_dim,
original_dim=num_features,
)
elif ARGS.autoencoder_model == "convolutional":
autoencoder = convolutional_autoencoder(
latent_dim=hp.latent_dim, original_dim=num_features,
)
elif ARGS.autoencoder_model == "variational":
autoencoder = variational_autoencoder(
original_dim=num_features,
intermediate_dim=hp.intermediate_dim,
latent_dim=hp.latent_dim,
)
else:
sys.exit("Wrong model for autoencoder!")
if ARGS.load_autoencoder is not None:
print("===== Loading pretrained autoencoder =====")
autoencoder.load_weights(ARGS.load_autoencoder).expect_partial()
print("loading pretrained model at {}".format(ARGS.load_autoencoder))
if ARGS.train_autoencoder:
print("===== Train autoencoder =====")
tf.convert_to_tensor(omics_data)
omics_data = tf.expand_dims(omics_data, axis=1)
training_dataset = tf.data.Dataset.from_tensor_slices(omics_data)
training_dataset = training_dataset.batch(hp.batch_size)
training_dataset = training_dataset.shuffle(num_patients)
training_dataset = training_dataset.prefetch(hp.batch_size * 4)
optimizer = tf.keras.optimizers.Adam(
(
tf.keras.optimizers.schedules.InverseTimeDecay(
hp.learning_rate, decay_steps=1, decay_rate=5e-5
)
)
)
train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32)
for epoch in range(hp.num_epochs):
for step, batch_features in enumerate(training_dataset):
autoencoder_train(
autoencoder_loss_fn,
autoencoder,
optimizer,
batch_features,
train_loss,
)
tf.summary.scalar("loss", train_loss.result(), step=epoch)
if not ARGS.no_save:
save_name = "epoch_{}".format(epoch)
autoencoder.save_weights(
filepath=checkpoint_path + os.sep + save_name, save_format="tf"
)
template = "Epoch {}, Loss {:.8f}"
tf.print(
template.format(epoch + 1, train_loss.result()),
output_stream="file://{}/loss.log".format(logs_path),
)
print(template.format(epoch + 1, train_loss.result()))
train_loss.reset_states()
if ARGS.train_classifier:
print("===== train classifier =====")
print("classifier data: {}".format(ARGS.classifier_data))
print("===== classifier preprocess =====")
if ARGS.classifier_data == "merged":
biomed_df = pd.read_csv(ARGS.biomed_data, index_col=0)
num_biomed_features = biomed_df.shape[1] - 1
merged_df = pd.read_csv(ARGS.merged_data, index_col=0).astype("float32")
print(
"{} contains {} patients with {} features".format(
ARGS.merged_data.split("/")[-1],
merged_df.shape[0],
merged_df.shape[1] - 1,
)
)
X, Y = merged_df.iloc[:, :-1], merged_df.iloc[:, -1]
tf.convert_to_tensor(X)
tf.convert_to_tensor(Y)
X = tf.expand_dims(X, axis=1)
Y = tf.expand_dims(Y, axis=1)
X_omics = X[:, :, num_biomed_features:]
X_biomed = X[:, :, :num_biomed_features]
if ARGS.autoencoder_model == "variational":
X_omics = autoencoder.encoder(X_omics)[-1]
else:
X_omics = autoencoder.encoder(X_omics)
X_encoded = X_omics.numpy().reshape(-1, hp.latent_dim)
# TODO: save as .csv file with barcode as index
if ARGS.save_encoded_omics:
df = pd.DataFrame(X_encoded, index=merged_df.index)
text = "latent_features_{}_{}".format(
ARGS.autoencoder_model, ARGS.omics_data.split("/")[-1],
)
df.to_csv(text)
print("save encoded omics features in {}".format(text))
print("===== finish omics encoding =====")
X = tf.concat([X_omics, X_biomed], axis=2)
X, Y = (
X.numpy().reshape(-1, hp.latent_dim + num_biomed_features),
Y.numpy().reshape(-1,),
)
elif ARGS.classifier_data == "biomed":
biomed_df = pd.read_csv(ARGS.biomed_data, index_col=0)
num_biomed_features = biomed_df.shape[1] - 1
merged_df = pd.read_csv(ARGS.merged_data, index_col=0).astype("float32")
print(
"{} contains biomed data for {} patients with {} features".format(
ARGS.merged_data.split("/")[-1],
merged_df.shape[0],
num_biomed_features,
)
)
X, Y = merged_df.iloc[:, :-1], merged_df.iloc[:, -1]
tf.convert_to_tensor(X)
tf.convert_to_tensor(Y)
X = tf.expand_dims(X, axis=1)
Y = tf.expand_dims(Y, axis=1)
# X_biomed = X[:, :, -num_biomed_features:]
X_biomed = X[:, :, :num_biomed_features]
X, Y = (
X_biomed.numpy().reshape(-1, num_biomed_features),
Y.numpy().reshape(-1,),
)
elif ARGS.classifier_data == "omics":
biomed_df = pd.read_csv(ARGS.biomed_data, index_col=0)
num_biomed_features = biomed_df.shape[1] - 1
merged_df = pd.read_csv(ARGS.merged_data, index_col=0).astype("float32")
print(
"{} contains omics data for {} patients with {} features".format(
ARGS.merged_data.split("/")[-1],
merged_df.shape[0],
merged_df.shape[1] - 1 - num_biomed_features,
)
)
X, Y = (
merged_df.iloc[:, : -1 - num_biomed_features].to_numpy(),
merged_df.iloc[:, -1].to_numpy(),
)
elif ARGS.classifier_data == "encoded_omics":
biomed_df = pd.read_csv(ARGS.biomed_data, index_col=0)
num_biomed_features = biomed_df.shape[1] - 1
merged_df =
|
pd.read_csv(ARGS.merged_data, index_col=0)
|
pandas.read_csv
|
from typing import Optional
from tqdm import tqdm
import numpy as np
import pandas as pd
# Feature selections
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold
# ROC
from sklearn.metrics import confusion_matrix, auc, plot_roc_curve
# Scores
from sklearn.metrics import f1_score, balanced_accuracy_score, precision_score, recall_score, make_scorer
# Models
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
# Resampling
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from matplotlib import gridspec
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def chooseSampler(sampleMethod: Optional[str]):
if sampleMethod == "under":
return ("underSampler", RandomUnderSampler(sampling_strategy="majority"))
elif sampleMethod == "over":
return ("overSampler", SMOTE(sampling_strategy="minority"))
elif sampleMethod == "both":
return "overSampler", SMOTE(sampling_strategy="minority"),\
"underSampler", RandomUnderSampler(sampling_strategy="majority")
else:
return None
def getPipe(model, sampleMethod: Optional[str]):
sampler = chooseSampler(sampleMethod)
if not (sampler):
return Pipeline([
('scale', StandardScaler()),
("pca", PCA()),
('model', model)
])
if len(sampler)==2:
return Pipeline([
('scale', StandardScaler()),
("pca", PCA()),
sampler,
('model', model)
])
elif len(sampler)==4:
return Pipeline([
('scale', StandardScaler()),
("pca", PCA()),
sampler[0:2],
sampler[2:4],
('model', model)
])
else:
raise ValueError("Wrong number of samplers: len(sampler)={}".format(len(sampler)))
def findParamGrid(model, numFeatures, searchPC):
typeModel = type(model)
if typeModel == type(RandomForestClassifier()):
return {#"model__n_estimators": [10, 100, 1000],
"model__max_features": ['auto'],#, 'sqrt', 'log2'],#[1, 25,50, 75, 100], #
"model__max_depth" : np.arange(1,8),
#"model__criterion" :['gini', 'entropy'],
"pca__n_components": range(1,numFeatures+1, 2) if (searchPC) else [numFeatures]
}
elif typeModel == type(GradientBoostingClassifier()):
return {#"model__loss":["deviance", "exponential"],
#"model__learning_rate": [0.01, 0.025, 0.1, 0.2],
"model__max_depth":np.arange(1,8),
"model__max_features":['auto'],#, 'sqrt', 'log2'],#[25,50, 75, 100], #['auto', 'sqrt', 'log2'],
#"model__criterion": ["friedman_mse", "mse"],
#"model__subsample":[0.5, 0.75, 1],
#"model__n_estimators":[10,100,1000],
"pca__n_components": range(1,numFeatures+1, 2) if (searchPC) else [numFeatures]
}
elif typeModel == type(DecisionTreeClassifier()):
return {"model__max_features": ['sqrt'],# 'log2'],
#"model__min_samples_split": np.linspace(0.1, 0.5, 2),
#"model__min_samples_leaf": np.linspace(0.1, 0.5, 2),
"model__max_depth" : np.arange(1,8),
#"model__ccp_alpha" : np.arange(0, 1, 0.05)
#"model__criterion" :['gini'],#, 'entropy'],
"pca__n_components": range(1,numFeatures+1, 2) if (searchPC) else [numFeatures]
}
elif typeModel == type(LogisticRegression()):#penalty{‘l1’, ‘l2’, ‘elasticnet’, ‘none’}
return {"model__penalty":["l2"],# "l2", "elasticnet", "none"],
"model__C": np.logspace(-3,5,7),
"model__max_iter":[200, 400],
"pca__n_components": range(1,numFeatures+1, 2) if (searchPC) else [numFeatures]
}
else:
raise TypeError("No model has been specified: type(model):{}".format(typeModel))
def applyGridSearch(X: pd.DataFrame, y, model, cv, numPC: int, sampleMethod="None", searchPC=False):
param_grid = findParamGrid(model, numFeatures=numPC, searchPC=searchPC)
## TODO: Insert these somehow in gridsearch (scoring=scoring,refit=False)
scoring = {'accuracy': make_scorer(balanced_accuracy_score),
'precision': make_scorer(precision_score),
'recall': make_scorer(recall_score),
'f1': make_scorer(f1_score),
}
# Making a pipeline
pipe = getPipe(model, sampleMethod)
# Do a gridSearch
grid = GridSearchCV(pipe, param_grid, scoring=scoring, refit="f1",
cv=cv,verbose=2,return_train_score=True, n_jobs=-1)
grid.fit(X, y)
print(grid.best_estimator_)
return grid.best_estimator_, grid
def fitAlgorithm(classifier, trainingData, trainingTarget):
"""
Fits a given classifier / pipeline
"""
#train the model
return classifier.fit(trainingData, trainingTarget)
import click
@click.command()
@click.option('--numberOfPrincipalComponents', prompt="aiwethg", default=1, help='Number of principal components.')
@click.option('--InsertApproach', prompt='Your name',
help='The person to greet. 01-naive-approach')
def optimalize_algorithms(InsertApproach,numberOfPrincipalComponents):
data = pd.read_pickle(data_dir / "processed" / "processedData.pkl")
trainingData = pd.read_pickle(data_dir / InsertApproach / "processed" / "trainingData.pkl")
trainingTarget= pd.read_pickle(data_dir / InsertApproach / "processed" / "trainingTarget.pkl")
testSet = pd.read_pickle(data_dir / InsertApproach / "processed" / "testSet.pkl")
trainingData
InsertAlgorithms = [LogisticRegression (random_state = random_state, max_iter=200),
DecisionTreeClassifier (random_state = random_state, max_features = "auto"),
RandomForestClassifier (random_state = random_state, max_features = "auto", max_depth=6),\
GradientBoostingClassifier(random_state = random_state, max_features = "auto")]
InsertAbbreviations = ["LOG", "DT", "RF", "GB"]
InsertprettyNames = ["Logistic regression", "Decision Tree", "Random Forest", "Gradient Boost"]
includeSampleMethods = [""]#, "under", "over", "both"]
numberRuns = 5
numberSplits = 5
rskfold = RepeatedStratifiedKFold(n_splits=numberSplits, n_repeats=numberRuns, random_state=random_state)
ModelsBestParams =
|
pd.Series({}, dtype="string")
|
pandas.Series
|
#!/usr/bin/env python3
#encoding: utf-8
import pandas as pd
import numpy as np
DATAFILELOC = './data/'
OPFILELOC = './output/'
RSEGROUPS = 'rse_groups.csv'
UKRSE = 'association-members.csv'
UKRESEARCHERS = 'hesa_number_of_researchers_uk.csv'
JOBS = 'rse_like_jobs.csv'
RSPENDING = 'research_spending.csv'
SALARY = 'salary.csv'
GLOBALRESEARCHERS = 'global_researchers.csv'
POPULATION = 'population.csv'
COUNTRYCODES = 'oecd_country_codes.csv'
def import_csv_to_df(location, filename):
"""
Imports a csv file into a Pandas dataframe
:params: an xls file and a sheetname from that file
:return: a df
"""
return pd.read_csv(location + filename, low_memory=False)
def export_to_csv(df, location, filename, index_write):
"""
Exports a df to a csv file
:params: a df and a location in which to save it
:return: nothing, saves a csv
"""
return df.to_csv(location + filename + '.csv', index=index_write)
def rse_group_average(DATAFILELOC, RSEGROUPS,num_of_groups_uk):
"""
Takes the data collected from UK RSE Groups, calculates the median group size, uses that data to make up for
the missing groups (I got data from 25 of the 29 of them) and then calculates the total number of people in UK RSE
Groups.
:param DATAFILELOC: location of data files
:param RSEGROUPS: csv with data on size of RSE Groups
:param num_of_groups_uk: the number of RSE Groups in the UK
:return: the total number of RSEs in UK RSE Groups
"""
# Get data on RSE Groups
df_rse_groups = import_csv_to_df(DATAFILELOC, RSEGROUPS)
column_names = df_rse_groups.columns
# Median group size in data and number of groups in data
median_group_size = round(df_rse_groups['No. of RSEs Jan 2020'].median(),0)
num_groups_in_data = len(df_rse_groups)
# Find missing groups
missing_groups = num_of_groups_uk - num_groups_in_data
# Add dummy data to make up for RSE groups not in original data
df_extra = pd.DataFrame([[np.NaN, np.NaN, np.NaN, median_group_size, np.NaN]], columns=column_names)
for i in range(missing_groups):
df_rse_groups = df_rse_groups.append(df_extra, ignore_index=True)
rses_in_groups = df_rse_groups['No. of RSEs Jan 2020'].sum()
return rses_in_groups
def rses_in_association(DATAFILELOC, UKRSE):
"""
Takes all the post-@-part of the email addresses of people signed up to the UKRSE Association, drops all the
obviously non-UK email addresses, drops half of the .com and .org ones too. Then counts the people who are left to
say how many UK RSEs are in the UK RSE Association.
:param DATAFILELOC: location of data files
:param UKRSE: csv of last parts of email addresses of people signed up to UKRSE Association
:return: the total number of RSEs in the UKRSE Association
"""
# Get data on UKRSE Association
df_ukrse = import_csv_to_df(DATAFILELOC, UKRSE)
# Get last part of email address in new col
df_ukrse['endings'] = df_ukrse['Email'].str.rsplit('.', n=1).str[1]
# This was used in presentation, not needed for analysis
#list_uks = df_ukrse[df_ukrse['endings']=='uk']['Email'].tolist()
#print(set(list_uks))
# Find all the .uk and .scot
df_uks = df_ukrse[df_ukrse['endings'].str.contains('uk|scot')]
uks = len(df_uks)
# Find all the .com and .org
df_coms_orgs = df_ukrse[df_ukrse['endings'].str.contains('com|org')]
coms_orgs = len(df_coms_orgs)
# Calculate how many members were in the UK by keeping all the .uk and .scot, but only
# half of the .com and .org
uk_rses_in_ukrse = uks + (coms_orgs/2)
return uk_rses_in_ukrse
def researchers_in_uk(DATAFILELOC, UKRESEARCHERS):
"""
Takes data from HESA and does a load of cleaning to
:param DATAFILELOC: location of data files
:param UKRESEARCHERS: csv of researchers in UK from HESA website
:return: the total number of researchers in the UK
"""
# Get data on UK researchers
df_uk_research = import_csv_to_df(DATAFILELOC, UKRESEARCHERS)
# First 28 rows of the csv are metadata! No, no, it's fine HESA. I've got tons of free time, don't you worry.
# Tidydata, please. Tidydata!
df_uk_research.columns = df_uk_research.iloc[27]
df_uk_research = df_uk_research.iloc[28:]
df_uk_research.reset_index(drop=True, inplace=True)
# Cut to latest year
df_uk_research = df_uk_research[df_uk_research['Academic Year']=='2018/19']
# Cut to just the academics
# Working with HESA data is like working with angry sharks. Given any freedom, you would choose not to, but sometimes
# you're forced into it. They've encoded the data they need for filtering on the website into their datasets, so
# there's massive duplication which the following five lines of code are needed to remove. Sigh.
df_uk_research = df_uk_research[df_uk_research['Activity standard occupational classification'] == 'Total academic staff']
df_uk_research = df_uk_research[df_uk_research['Mode of employment'] == 'All']
df_uk_research = df_uk_research[df_uk_research['Contract marker'] == 'Academic']
df_uk_research = df_uk_research[df_uk_research['Country of HE provider'] == 'All']
df_uk_research = df_uk_research[df_uk_research['Region of HE provider'] == 'All']
df_uk_research = df_uk_research[df_uk_research['HE Provider'] != 'Total']
df_uk_research['Number'] = df_uk_research['Number'].astype(int)
num_uk_academics = df_uk_research['Number'].sum()
return num_uk_academics
def get_mean_rse_like_jobs(DATAFILELOC, JOBS):
"""
Very simple function to calculate the mean of a few numbers related to
RSE like jobs
:param DATAFILELOC: location of data files
:param JOBS: data on the mean fraction of jobs advertised on jobs.ac.uk that are RSE like
:return: the mean of a list of fractions
"""
# Get the annual mean data
df_annuals = import_csv_to_df(DATAFILELOC, JOBS)
mean_annuals = round(df_annuals['fraction rse-like'].mean(),2)
return mean_annuals
def we_are_not_that_big(DATAFILELOC, RSPENDING, SALARY, GLOBALRESEARCHERS, POPULATION, num_rses_uk, OPFILELOC, COUNTRYCODES):
"""
Calculates the number of RSEs worldwide. It calculates compares research spend and average salary to the UK, then
compares number of researchers employed in the country to the UK, calculates the fractional difference between the
UK and each country, then multiplies this by the (pretty well) understood number of RSEs in the UK.
:param DATAFILELOC: location of data files
:param RSPENDING: csv of research spending per country
:param SALARY: csv of average salary per country
:param GLOBALRESEARCHERS: csv of number of researchers per country (as percentage of total population)
:param POPULATION: csv of population per country
:param num_rses_uk: known number of RSEs in the UK
:param OPFILELOC: location of output files
:param COUNTRYCODES: csv of short country codes and full country name
:return: a dict containing two values, each the number of RSEs in the world as calculated by one of the two methods
"""
#Get data
df_spending = import_csv_to_df(DATAFILELOC, RSPENDING)
df_salary = import_csv_to_df(DATAFILELOC, SALARY)
df_researchers = import_csv_to_df(DATAFILELOC, GLOBALRESEARCHERS)
df_pop = import_csv_to_df(DATAFILELOC, POPULATION)
df_countries = import_csv_to_df(DATAFILELOC, COUNTRYCODES)
df_countries.columns = ['country', 'LOCATION']
#Cut data to 2017 (the most recent year with the most data) and drop OECD and EU28 rows
# Set the year of interest
year_int = 2017
df_spending = df_spending[df_spending['TIME']==year_int]
df_spending = df_spending[df_spending['MEASURE']=='MLN_USD']
df_spending = df_spending[df_spending['LOCATION']!='OECD']
df_spending = df_spending[df_spending['LOCATION'] != 'EU28']
df_salary = df_salary[df_salary['TIME']==year_int]
df_researchers = df_researchers[df_researchers['TIME'] == year_int]
df_researchers = df_researchers[df_researchers['SUBJECT'] == 'TOT']
df_researchers = df_researchers[df_researchers['MEASURE'] == '1000EMPLOYED']
df_researchers = df_researchers[df_researchers['LOCATION']!='OECD']
df_researchers = df_researchers[df_researchers['LOCATION'] != 'EU28']
df_pop = df_pop[df_pop['TIME']==year_int]
df_pop = df_pop[df_pop['SUBJECT']=='TOT']
df_pop = df_pop[df_pop['MEASURE'] == 'MLN_PER']
# No salary data for China in OECD data, so have to add it (pinch of salt needed here)
# Average salary in China in 2017 (https://www.statista.com/statistics/278349/average-annual-salary-of-an-employee-in-china/#:~:text=In%202018%2C%20an%20employee%20in,yuan%20on%20average%20in%202017.)
av_salary = 74318
# USD to CNY exchange rate on 31 December 2017 (https://www.xe.com/currencytables/?from=USD&date=2017-12-31)
exg_rate = 0.1537053666
av_salary = av_salary * exg_rate
# Create dataframe
salary_columns = df_salary.columns
df_china = pd.DataFrame(columns=salary_columns)
df_china.loc[0] = ['CHN','AVWAGE','TOT','USD',np.NaN,'2017',av_salary,np.NaN]
# Add China data
df_salary = df_salary.append(df_china, ignore_index=True)
# Assume we're only half right about the number of RSEs in the UK
num_rses_uk = num_rses_uk/2
# Keep only countries for which I have spending and salary data
df_spends =
|
pd.merge(df_spending, df_salary, on='LOCATION', how='inner', suffixes=('_spend', '_salary'))
|
pandas.merge
|
"""
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.datetime64
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.timedelta64
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype) or
|
is_datetime_or_timedelta_dtype(dtype)
|
pandas.core.dtypes.common.is_datetime_or_timedelta_dtype
|
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from itertools import product
# 3rd party:
from pandas import (
DataFrame, to_datetime, date_range,
unique, MultiIndex, concat
)
# Internal:
try:
from __app__.utilities import func_logger
except ImportError:
from utilities import func_logger
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'homogenise_dates',
'homogenise_demographics_dates'
]
@func_logger("homogenisation")
def homogenise_dates(d: DataFrame):
"""
Parameters
----------
d
Returns
-------
"""
d.date = to_datetime(d.date, format="%Y-%m-%d")
col_names = d.columns
date = date_range(
start=
|
to_datetime(d.date)
|
pandas.to_datetime
|
# Goal: pull job metrics data from stratus and assemble it into
# a pandas dataframe.
# Then save the dataframe to work directory.
import boto3
import pandas as pd
import logging
url_name = 'https://stratus.ucar.edu'
bucket_name = 'rda-data'
file_prefix = 'web/jobMetrics/mem_metrics'
df_path = '/glade/work/jdubeau/job-metrics-data.json'
logging.basicConfig(filename='pull-jobmetrics-log.txt', level=logging.INFO)
def grab_bucket_contents(url, bucket, prefix):
""" Connects to the given url and grabs the all the files in the
given bucket which match the given prefix. Returns a list of strings
(the filenames) and a list of botocore.response.StreamingBody objects
which can later be read to get the actual contents of each file.
"""
# Note: creating a boto3 client uses credentials file from ~/.aws
client = boto3.client(endpoint_url=url, service_name='s3')
bucket_contents = client.list_objects_v2(Bucket=bucket,
Prefix=prefix)['Contents']
obj_keys = [c['Key'] for c in bucket_contents]
# Here bucket_contents is a list of dictionaries, one for each file in
# the bucket matching the prefix. For example, one element of
# bucket_contents could be:
# {'Key': 'web/jobMetrics/mem_metrics_20-09-16:1600.json',
# 'LastModified': ...,
# 'ETag': ...,
# 'Size': ...,
# 'StorageClass': ...}
# Therefore object_keys is the list of filenames we're interested in.
# We use these filenames to retrieve the files themselves.
objects = [client.get_object(Bucket=bucket, Key=k) for k in obj_keys]
obj_bodies = [ob['Body'] for ob in objects]
# Here is an example of a member of objects:
# {'ResponseMetadata': ...,
# 'AcceptRanges': ...,
# 'LastModified': ...,
# 'ContentLength': ...,
# 'ETag': ...,
# 'ContentType': ...,
# 'Metadata': ...,
# 'Body': <botocore.response.StreamingBody object at 0x7fa9f81b6be0>}
return obj_keys, obj_bodies
def read_and_store(obj_keys, obj_bodies):
""" Reads the StreamingBody objects contained in object_bodies and
saves each of them to a pandas dataframe. Returns a list of all the
dataframes created.
"""
all_data = [body.read() for body in obj_bodies]
all_dfs = []
for i in range(len(all_data)):
current_data = all_data[i]
try:
all_dfs.append(pd.read_json(current_data, orient='index'))
except:
logging.warning(f"Could not parse JSON data from {obj_keys[i]}")
logging.info(f"Finished parsing JSON data \
({len(all_dfs)}/{len(all_data)} successful)")
return all_dfs
def combine_and_save(dfs_list):
"""
Combines the dataframes in dfs_list into one, removes any duplicate
rows, and saves the combined dataframe to a file (using the path
given by df_path).
"""
merged_df =
|
pd.concat(dfs_list, axis=0)
|
pandas.concat
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, sys
os.getcwd()
# #!pip install azure-storage-blob --user
# #!pip install storefact --user
# + {"active": "ipynb"}
# import os, sys
# import configparser
# sys.path.append('/home/jovyan/.local/lib/python3.6/site-packages/')
# print(sys.path)
#
# os.path.abspath("AzureDownload/config.txt")
# os.getcwd()
# config = configparser.ConfigParser()
# config.read("/home/jovyan/AzureDownload/config.txt")
# config.sections()
# -
# ### Credentials setup, read the WoS jounral name mapped table from Azure
# + {"active": "ipynb"}
# import time
# from azure.storage.blob import BlockBlobService
#
# CONTAINERNAME = "mag-2019-01-25"
# BLOBNAME= "MAGwosJournalMatch/OpenSci3Journal.csv/part-00000-tid-8679026268804875386-7586e989-d017-4b12-9d5a-53fc6497ec02-1116-c000.csv"
# LOCALFILENAME= "/home/jovyan/openScience/code-data/OpenSci3Journal.csv"
#
# block_blob_service=BlockBlobService(account_name=config.get("configuration","account"),account_key=config.get("configuration","password"))
# #download from blob
# t1=time.time()
# block_blob_service.get_blob_to_path(CONTAINERNAME,BLOBNAME,LOCALFILENAME)
# t2=time.time()
# print(("It takes %s seconds to download "+BLOBNAME) % (t2 - t1))
# +
import pandas as pd
openJ = pd.read_csv('input/OpenSci3Journal.csv', escapechar='\\', encoding='utf-8')
openJ.count()
# -
# ### To verify that the Spark output is consistent, we compare the pandas dataframes before and after the WoS jounral mapping
open0 = pd.read_csv('output/OpenSci3.csv', escapechar='\\', encoding='utf-8')
open0.count()
# ### Compare matched MAG journal names and WoS journal names
openJ['Journal'] = openJ.Journal.str.lower()
openJ['WoSjournal'] = openJ.WoSjournal.str.lower()
matched = openJ[openJ['Journal'] == openJ['WoSjournal']]
matched.count()
# ### Matching with UCSD map of science journal names
journalMap = pd.read_csv('WoSmatch/journalName.csv')
journalMap['journal_name'] = journalMap.journal_name.str.lower()
JwosMap = journalMap[journalMap['source_type']=="Thomson"]
MAGmatched = pd.merge(openJ, JwosMap, left_on=['Journal'], right_on=['journal_name'], how='left')
MAGmatched.count()
WoSmatched = pd.merge(openJ, JwosMap, left_on=['WoSjournal'], right_on=['journal_name'], how='left')
WoSmatched.count()
# ### Combining matched journal names from WoS and MAG to the UCSD map of science
MAGmatched.update(WoSmatched)
MAGmatched.count()
# ### Mapping from matched jounrals to subdisciplines
JsubMap = pd.read_csv('WoSmatch/jounral-subdiscipline.csv')
JsubMap.journ_id = JsubMap.journ_id.astype('float64')
subMatched = pd.merge(MAGmatched, JsubMap, left_on=['journ_id'], right_on=['journ_id'], how='left').drop(columns='formal_name')
subMatched.count()
#subMatched.dtypes
subTable = pd.read_csv('WoSmatch/subdiscipline.csv')
subTable.subd_id = subTable.subd_id.astype('float64')
subNameMatched = pd.merge(subMatched, subTable, left_on=['subd_id'], right_on=['subd_id'], how='left').drop(columns=['size','x','y'])
subNameMatched.count()
# ### Since each journal has a distribution of corresponding disciplines, we will collect the disipline vectors into new columns
import numpy as np
majTable = pd.read_csv('WoSmatch/discipline.csv')
majTable.disc_id = majTable.disc_id.astype('float64')
discMatched = pd.merge(subNameMatched, majTable, left_on=['disc_id'], right_on=['disc_id'], how='left').drop(columns=['color','x','y'])
discMatched.jfraction = discMatched.jfraction.astype('str')
discMatched.subd_name = discMatched.subd_name.astype('str')
discMatched.disc_name = discMatched.disc_name.astype('str')
temp = pd.DataFrame()
temp = discMatched[['PaperId','WoSID','WoSjournal','jfraction','subd_name','disc_name']]
temp['jfraction'] = discMatched.groupby(['PaperId'])['jfraction'].transform(lambda x: ';'.join(x)).replace('nan', np.nan)
temp['subd_name'] = discMatched.groupby(['PaperId'])['subd_name'].transform(lambda x: ';'.join(x)).replace('nan', np.nan)
temp['disc_name'] = discMatched.groupby(['PaperId'])['disc_name'].transform(lambda x: ';'.join(x)).replace('nan', np.nan)
temp2 = temp.drop_duplicates()
temp2.count()
# ### Groupby number matches and we merge the mapped discipline data back to the OpenSci3.csv
OpenSci3Disc =
|
pd.merge(open0, temp2, left_on=['PaperId'], right_on=['PaperId'], how='left')
|
pandas.merge
|
from orbit.models.ktrlite import KTRLite
import pandas as pd
import numpy as np
import math
from scipy.stats import nct
from enum import Enum
import torch
import matplotlib.pyplot as plt
from copy import deepcopy
from ..constants.constants import (
KTRTimePointPriorKeys,
PredictMethod,
TrainingMetaKeys,
PredictionMetaKeys
)
from ..exceptions import IllegalArgument, ModelException, PredictionException
from ..utils.general import is_ordered_datetime
from ..utils.kernels import gauss_kernel, sandwich_kernel
from ..utils.features import make_seasonal_regressors
from .model_template import ModelTemplate
from ..estimators.pyro_estimator import PyroEstimatorSVI
from ..models import KTRLite
from orbit.constants.palette import OrbitPalette
from ..utils.knots import get_knot_idx, get_knot_dates
from ..utils.plot import orbit_style_decorator
class DataInputMapper(Enum):
"""
mapping from object input to pyro input
"""
# All of the following have default defined in DEFAULT_SLGT_FIT_ATTRIBUTES
# ---------- Data Input ---------- #
# observation related
NUM_OF_VALID_RESPONSE = 'N_VALID_RES'
WHICH_VALID_RESPONSE = 'WHICH_VALID_RES'
RESPONSE_OFFSET = 'MEAN_Y'
DEGREE_OF_FREEDOM = 'DOF'
_RESIDUALS_SCALE_UPPER = 'RESID_SCALE_UB'
# ---------- Level ---------- #
_NUM_KNOTS_LEVEL = 'N_KNOTS_LEV'
LEVEL_KNOT_SCALE = 'LEV_KNOT_SCALE'
_KERNEL_LEVEL = 'K_LEV'
# ---------- Regression ---------- #
_NUM_KNOTS_COEFFICIENTS = 'N_KNOTS_COEF'
_KERNEL_COEFFICIENTS = 'K_COEF'
_NUM_OF_REGULAR_REGRESSORS = 'N_RR'
_NUM_OF_POSITIVE_REGRESSORS = 'N_PR'
_NUM_OF_NEGATIVE_REGRESSORS = 'N_NR'
_REGULAR_REGRESSOR_MATRIX = 'RR'
_POSITIVE_REGRESSOR_MATRIX = 'PR'
_NEGATIVE_REGRESSOR_MATRIX = 'NR'
_REGULAR_REGRESSOR_INIT_KNOT_LOC = 'RR_INIT_KNOT_LOC'
_REGULAR_REGRESSOR_INIT_KNOT_SCALE = 'RR_INIT_KNOT_SCALE'
_REGULAR_REGRESSOR_KNOT_SCALE = 'RR_KNOT_SCALE'
_POSITIVE_REGRESSOR_INIT_KNOT_LOC = 'PR_INIT_KNOT_LOC'
_POSITIVE_REGRESSOR_INIT_KNOT_SCALE = 'PR_INIT_KNOT_SCALE'
_POSITIVE_REGRESSOR_KNOT_SCALE = 'PR_KNOT_SCALE'
_NEGATIVE_REGRESSOR_INIT_KNOT_LOC = 'NR_INIT_KNOT_LOC'
_NEGATIVE_REGRESSOR_INIT_KNOT_SCALE = 'NR_INIT_KNOT_SCALE'
_NEGATIVE_REGRESSOR_KNOT_SCALE = 'NR_KNOT_SCALE'
# ---------- Prior Specification ---------- #
_COEF_PRIOR_LIST = 'COEF_PRIOR_LIST'
_LEVEL_KNOTS = 'LEV_KNOT_LOC'
_SEAS_TERM = 'SEAS_TERM'
class BaseSamplingParameters(Enum):
"""
The output sampling parameters related with the base model
"""
LEVEL_KNOT = 'lev_knot'
LEVEL = 'lev'
YHAT = 'yhat'
OBS_SCALE = 'obs_scale'
class RegressionSamplingParameters(Enum):
"""
The output sampling parameters related with regression component.
"""
COEFFICIENTS_KNOT = 'coef_knot'
COEFFICIENTS_INIT_KNOT = 'coef_init_knot'
COEFFICIENTS = 'coef'
# Defaults Values
DEFAULT_REGRESSOR_SIGN = '='
DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE = 1.0
DEFAULT_COEFFICIENTS_INIT_KNOT_LOC = 0
DEFAULT_COEFFICIENTS_KNOT_SCALE = 0.1
DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER = 0.01
DEFAULT_UPPER_BOUND_SCALE_MULTIPLIER = 1.0
class KTRModel(ModelTemplate):
"""Base KTR model object with shared functionality for PyroVI method
Parameters
----------
level_knot_scale : float
sigma for level; default to be .1
level_segments : int
the number of segments partitioned by the knots of level (trend)
level_knot_distance : int
the distance between every two knots of level (trend)
level_knot_dates : array like
list of pre-specified dates for the level knots
seasonality : int, or list of int
multiple seasonality
seasonality_fs_order : int, or list of int
fourier series order for seasonality
seasonality_segments : int
the number of segments partitioned by the knots of seasonality
seasonal_initial_knot_scale : float
scale parameter for seasonal regressors initial coefficient knots; default to be 1
seasonal_knot_scale : float
scale parameter for seasonal regressors drift of coefficient knots; default to be 0.1.
regressor_col : array-like strings
regressor columns
regressor_sign : list
list of signs with '=' for regular regressor, '+' for positive regressor, and '-' for negative regressor.
regressor_init_knot_loc : list
list of regressor knot pooling mean priors, default to be 0's
regressor_init_knot_scale : list
list of regressor knot pooling sigma's to control the pooling strength towards the grand mean of regressors;
default to be 1.
regressor_knot_scale : list
list of regressor knot sigma priors; default to be 0.1.
regression_segments : int
the number of segments partitioned by the knots of regression
regression_knot_distance : int
the distance between every two knots of regression
regression_knot_dates : array-like
list of pre-specified dates for regression knots
regression_rho : float
sigma in the Gaussian kernel for the regression term
degree of freedom : int
degree of freedom for error t-distribution
date_freq : str
date frequency; if not supplied, the minimum timestamp difference in the date would be used.
coef_prior_list : list of dicts
each dict in the list should have keys as
'name', prior_start_tp_idx' (inclusive), KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value (not inclusive),
KTRTimePointPriorKeys.PRIOR_MEAN.value, KTRTimePointPriorKeys.PRIOR_SD.value, and KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
residuals_scale_upper : float
flat_multiplier : bool
Default set as True. If False, we will adjust knot scale with a multiplier based on regressor volume
around each knot; When True, set all multiplier as 1
ktrlite_optim_args : dict
the optimizing config for the ktrlite model (to fit level/seasonality). Default to be dict().
"""
_data_input_mapper = DataInputMapper
# stan or pyro model name (e.g. name of `*.stan` file in package)
_model_name = 'ktr'
_supported_estimator_types = [PyroEstimatorSVI]
def __init__(self,
# level
level_knot_scale=0.1,
level_segments=10,
level_knot_distance=None,
level_knot_dates=None,
# seasonality
seasonality=None,
seasonality_fs_order=None,
seasonality_segments=2,
seasonal_initial_knot_scale=1.0,
seasonal_knot_scale=0.1,
# regression
regressor_col=None,
regressor_sign=None,
regressor_init_knot_loc=None,
regressor_init_knot_scale=None,
regressor_knot_scale=None,
regression_segments=5,
regression_knot_distance=None,
regression_knot_dates=None,
regression_rho=0.15,
# shared
degree_of_freedom=30,
date_freq=None,
# time-based coefficient priors
coef_prior_list=None,
flat_multiplier=True,
residuals_scale_upper=None,
ktrlite_optim_args=dict(),
**kwargs):
super().__init__(**kwargs) # create estimator in base class
# level configurations
self.level_knot_scale = level_knot_scale
self.level_segments = level_segments
self.level_knot_distance = level_knot_distance
self.level_knot_dates = level_knot_dates
self._level_knot_dates = self.level_knot_dates
self.level_knots = None
self._level_knots = None
self._kernel_level = None
self._num_knots_level = None
self.knots_tp_level = None
# seasonality configurations
self.seasonality = seasonality
self.seasonality_fs_order = seasonality_fs_order
self._seasonality = self.seasonality
# used to name different seasonal components in prediction
self._seasonality_labels = list()
self._seasonality_fs_order = self.seasonality_fs_order
self.seasonal_initial_knot_scale = seasonal_initial_knot_scale
self.seasonal_knot_scale = seasonal_knot_scale
self.seasonality_segments = seasonality_segments
self._seas_term = 0
self._seasonality_coef_knot_dates = None
self._seasonality_coef_knots = None
# regression configurations
self.regressor_col = regressor_col
self.regressor_sign = regressor_sign
self.regressor_init_knot_loc = regressor_init_knot_loc
self.regressor_init_knot_scale = regressor_init_knot_scale
self.regressor_knot_scale = regressor_knot_scale
self.regression_knot_distance = regression_knot_distance
self.regression_segments = regression_segments
self._regression_knot_dates = regression_knot_dates
self.regression_rho = regression_rho
self.flat_multiplier = flat_multiplier
# set private var to arg value
# if None set default in _set_default_args()
self._regressor_sign = self.regressor_sign
self._regressor_init_knot_loc = self.regressor_init_knot_loc
self._regressor_init_knot_scale = self.regressor_init_knot_scale
self._regressor_knot_scale = self.regressor_knot_scale
self.coef_prior_list = coef_prior_list
self._coef_prior_list = []
self._regression_knots_idx = None
self._num_of_regressors = 0
# positive regressors
self._num_of_positive_regressors = 0
self._positive_regressor_col = list()
self._positive_regressor_init_knot_loc = list()
self._positive_regressor_init_knot_scale = list()
self._positive_regressor_knot_scale_1d = list()
self._positive_regressor_knot_scale = list()
# negative regressors
self._num_of_negative_regressors = 0
self._negative_regressor_col = list()
self._negative_regressor_init_knot_loc = list()
self._negative_regressor_init_knot_scale = list()
self._negative_regressor_knot_scale_1d = list()
self._negative_regressor_knot_scale = list()
# regular regressors
self._num_of_regular_regressors = 0
self._regular_regressor_col = list()
self._regular_regressor_init_knot_loc = list()
self._regular_regressor_init_knot_scale = list()
self._regular_regressor_knot_scale_1d = list()
self._regular_regressor_knot_scale = list()
self._regressor_col = list()
# init dynamic data attributes
# the following are set by `_set_dynamic_attributes()` and generally set during fit()
# from input df
# response data
self._is_valid_response = None
self._which_valid_response = None
self._num_of_valid_response = 0
# regression data
self._knots_tp_coefficients = None
self._positive_regressor_matrix = None
self._negative_regressor_matrix = None
self._regular_regressor_matrix = None
# other configurations
self.date_freq = date_freq
self.degree_of_freedom = degree_of_freedom
self.residuals_scale_upper = residuals_scale_upper
self._residuals_scale_upper = residuals_scale_upper
self.ktrlite_optim_args = ktrlite_optim_args
self._set_static_attributes()
self._set_model_param_names()
def _set_model_param_names(self):
"""Overriding base template functions. Model parameters to extract"""
self._model_param_names += [param.value for param in BaseSamplingParameters]
if self._num_of_regressors > 0:
self._model_param_names += [param.value for param in RegressionSamplingParameters]
def _set_default_args(self):
"""Set default attributes for None"""
# default checks for seasonality and seasonality_fs_order will be conducted
# in ktrlite model and we will extract them from ktrlite model directly later
if self.coef_prior_list is not None:
self._coef_prior_list = deepcopy(self.coef_prior_list)
# if no regressors, end here #
if self.regressor_col is None:
# regardless of what args are set for these, if regressor_col is None
# these should all be empty lists
self._regressor_sign = list()
self._regressor_init_knot_loc = list()
self._regressor_init_knot_scale = list()
self._regressor_knot_scale = list()
return
def _validate_params_len(params, valid_length):
for p in params:
if p is not None and len(p) != valid_length:
raise IllegalArgument('Wrong dimension length in Regression Param Input')
# regressor defaults
num_of_regressors = len(self.regressor_col)
_validate_params_len([
self.regressor_sign, self.regressor_init_knot_loc,
self.regressor_init_knot_scale, self.regressor_knot_scale],
num_of_regressors
)
if self.regressor_sign is None:
self._regressor_sign = [DEFAULT_REGRESSOR_SIGN] * num_of_regressors
if self.regressor_init_knot_loc is None:
self._regressor_init_knot_loc = [DEFAULT_COEFFICIENTS_INIT_KNOT_LOC] * num_of_regressors
if self.regressor_init_knot_scale is None:
self._regressor_init_knot_scale = [DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE] * num_of_regressors
if self.regressor_knot_scale is None:
self._regressor_knot_scale = [DEFAULT_COEFFICIENTS_KNOT_SCALE] * num_of_regressors
self._num_of_regressors = num_of_regressors
def _set_static_regression_attributes(self):
# if no regressors, end here
if self._num_of_regressors == 0:
return
for index, reg_sign in enumerate(self._regressor_sign):
if reg_sign == '+':
self._num_of_positive_regressors += 1
self._positive_regressor_col.append(self.regressor_col[index])
# used for 'pr_knot_loc' sampling in pyro
self._positive_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._positive_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'pr_knot' sampling in pyro
self._positive_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
elif reg_sign == '-':
self._num_of_negative_regressors += 1
self._negative_regressor_col.append(self.regressor_col[index])
# used for 'nr_knot_loc' sampling in pyro
self._negative_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._negative_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'nr_knot' sampling in pyro
self._negative_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
else:
self._num_of_regular_regressors += 1
self._regular_regressor_col.append(self.regressor_col[index])
# used for 'rr_knot_loc' sampling in pyro
self._regular_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._regular_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'rr_knot' sampling in pyro
self._regular_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
# regular first, then positive, then negative
self._regressor_col = self._regular_regressor_col + self._positive_regressor_col + self._negative_regressor_col
# numpy conversion
self._positive_regressor_init_knot_loc = np.array(self._positive_regressor_init_knot_loc)
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_knot_scale_1d = np.array(self._positive_regressor_knot_scale_1d)
self._negative_regressor_init_knot_loc = np.array(self._negative_regressor_init_knot_loc)
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_knot_scale_1d = np.array(self._negative_regressor_knot_scale_1d)
self._regular_regressor_init_knot_loc = np.array(self._regular_regressor_init_knot_loc)
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_knot_scale_1d = np.array(self._regular_regressor_knot_scale_1d)
@staticmethod
def _validate_coef_prior(coef_prior_list):
for test_dict in coef_prior_list:
if set(test_dict.keys()) != set([
KTRTimePointPriorKeys.NAME.value,
KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
]):
raise IllegalArgument('wrong key name in inserted prior dict')
len_insert_prior = list()
for key, val in test_dict.items():
if key in [
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value,
]:
len_insert_prior.append(len(val))
if not all(len_insert == len_insert_prior[0] for len_insert in len_insert_prior):
raise IllegalArgument('wrong dimension length in inserted prior dict')
# @staticmethod
# def _validate_level_knot_inputs(level_knot_dates, level_knots):
# if len(level_knots) != len(level_knot_dates):
# raise IllegalArgument('level_knots and level_knot_dates should have the same length')
def _set_coef_prior_idx(self):
if self._coef_prior_list and len(self._regressor_col) > 0:
for x in self._coef_prior_list:
prior_regressor_col_idx = [
np.where(np.array(self._regressor_col) == col)[0][0]
for col in x[KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value]
]
x.update({'prior_regressor_col_idx': prior_regressor_col_idx})
def _set_static_attributes(self):
"""model data input based on args at instantiation or computed from args at instantiation"""
self._set_default_args()
self._set_static_regression_attributes()
# self._validate_level_knot_inputs(self.level_knot_dates, self.level_knots)
if self._coef_prior_list:
self._validate_coef_prior(self._coef_prior_list)
self._set_coef_prior_idx()
def _set_valid_response_attributes(self, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
response = training_meta[TrainingMetaKeys.RESPONSE.value]
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
if num_of_observations < max_seasonality:
raise ModelException(
"Number of observations {} is less than max seasonality {}".format(
num_of_observations, max_seasonality))
# get some reasonable offset to regularize response to make default priors scale-insensitive
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
self.response_offset = np.nanmean(response[:max_seasonality])
else:
self.response_offset = np.nanmean(response)
self.is_valid_response = ~np.isnan(response)
# [0] to convert tuple back to array
self.which_valid_response = np.where(self.is_valid_response)[0]
self.num_of_valid_response = len(self.which_valid_response)
def _set_regressor_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
# validate regression columns
if self.regressor_col is not None and \
not set(self.regressor_col).issubset(df.columns):
raise ModelException(
"DataFrame does not contain specified regressor column(s)."
)
# init of regression matrix depends on length of response vector
self._positive_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._negative_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._regular_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
# update regression matrices
if self._num_of_positive_regressors > 0:
self._positive_regressor_matrix = df.filter(
items=self._positive_regressor_col, ).values
if self._num_of_negative_regressors > 0:
self._negative_regressor_matrix = df.filter(
items=self._negative_regressor_col, ).values
if self._num_of_regular_regressors > 0:
self._regular_regressor_matrix = df.filter(
items=self._regular_regressor_col, ).values
def _set_coefficients_kernel_matrix(self, df, training_meta):
"""Derive knots position and kernel matrix and other related meta data"""
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
# placeholder
self._kernel_coefficients = np.zeros((num_of_observations, 0), dtype=np.double)
self._num_knots_coefficients = 0
if self._num_of_regressors > 0:
self._regression_knots_idx = get_knot_idx(
date_array=date_array,
num_of_obs=num_of_observations,
knot_dates=self._regression_knot_dates,
knot_distance=self.regression_knot_distance,
num_of_segments=self.regression_segments,
date_freq=self.date_freq,
)
tp = np.arange(1, num_of_observations + 1) / num_of_observations
self._knots_tp_coefficients = (1 + self._regression_knots_idx) / num_of_observations
self._kernel_coefficients = gauss_kernel(tp, self._knots_tp_coefficients, rho=self.regression_rho)
self._num_knots_coefficients = len(self._knots_tp_coefficients)
if self.date_freq is None:
self.date_freq = date_array.diff().min()
self._regression_knot_dates = get_knot_dates(date_array[0], self._regression_knots_idx, self.date_freq)
def _set_knots_scale_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
if self._num_of_positive_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_positive_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._positive_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._positive_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._positive_regressor_knot_scale has shape num_of_pr x num_of_knot
self._positive_regressor_knot_scale = (
multiplier * np.expand_dims(self._positive_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._positive_regressor_knot_scale[self._positive_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_init_knot_scale[self._positive_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_negative_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_negative_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._negative_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._negative_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
self._negative_regressor_knot_scale = (
multiplier * np.expand_dims(self._negative_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._negative_regressor_knot_scale[self._negative_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_init_knot_scale[self._negative_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_regular_regressors > 0:
# do the same for regular regressor
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_regular_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._regular_regressor_matrix[str_idx:end_idx]), axis=0)
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
global_mean = np.expand_dims(np.mean(np.fabs(self._regular_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._regular_regressor_knot_scale has shape num_of_pr x num_of_knot
self._regular_regressor_knot_scale = (
multiplier * np.expand_dims(self._regular_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._regular_regressor_knot_scale[self._regular_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_init_knot_scale[self._regular_regressor_init_knot_scale < 1e-4] = 1e-4
def _generate_tp(self, training_meta, prediction_date_array):
"""Used in _generate_seas"""
training_end = training_meta[TrainingMetaKeys.END.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
prediction_start = prediction_date_array[0]
output_len = len(prediction_date_array)
if prediction_start > training_end:
start = num_of_observations
else:
start = pd.Index(date_array).get_loc(prediction_start)
new_tp = np.arange(start + 1, start + output_len + 1) / num_of_observations
return new_tp
def _generate_insample_tp(self, training_meta, date_array):
"""Used in _generate_seas"""
train_date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
idx = np.nonzero(np.in1d(train_date_array, date_array))[0]
tp = (idx + 1) / num_of_observations
return tp
# def _generate_coefs(self, training_meta, prediction_date_array, coef_knot_dates, coef_knot):
# """Used in _generate_seas"""
# new_tp = self._generate_tp(training_meta, prediction_date_array)
# knots_tp_coef = self._generate_insample_tp(training_meta, coef_knot_dates)
# kernel_coef = sandwich_kernel(new_tp, knots_tp_coef)
# coefs = np.squeeze(np.matmul(coef_knot, kernel_coef.transpose(1, 0)), axis=0).transpose(1, 0)
# return coefs
def _generate_seas(self, df, training_meta, coef_knot_dates, coef_knots,
seasonality, seasonality_fs_order, seasonality_labels):
"""To calculate the seasonality term based on the _seasonal_knots_input.
Parameters
----------
df : pd.DataFrame
input df
training_meta: dict
meta dictionary for the training input
coef_knot_dates : 1-D array like
dates for seasonality coefficient knots
coef_knots : dict
dict of seasonal coefficient knots from each seasonality
seasonality : list
seasonality input; list of float
seasonality_fs_order : list
seasonality_fs_order input list of int
Returns
-----------
dict :
a dictionary contains seasonal regression components mapped by each seasonality
"""
df = df.copy()
# store each component as a dictionary
seas_decomp = dict()
if seasonality is not None and len(seasonality) > 0:
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
training_end = training_meta[TrainingMetaKeys.END.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
prediction_date_array = df[date_col].values
prediction_start = prediction_date_array[0]
if prediction_start > training_end:
# time index for prediction start
start = num_of_observations
else:
# time index for prediction start
start = pd.Index(date_array).get_loc(prediction_start)
# dictionary
seas_regressors = make_seasonal_regressors(
n=df.shape[0],
periods=seasonality,
orders=seasonality_fs_order,
labels=seasonality_labels,
shift=start,
)
new_tp = self._generate_tp(training_meta, prediction_date_array)
knots_tp_coef = self._generate_insample_tp(training_meta, coef_knot_dates)
coef_kernel = sandwich_kernel(new_tp, knots_tp_coef)
# init of regression matrix depends on length of response vector
total_seas_regression = np.zeros((1, df.shape[0]), dtype=np.double)
for k in seasonality_labels:
seas_regresor_matrix = seas_regressors[k]
coef_knot = coef_knots[k]
# time-step x coefficients
seas_coef = np.squeeze(np.matmul(coef_knot, coef_kernel.transpose(1, 0)), axis=0).transpose(1, 0)
seas_regression = np.sum(seas_coef * seas_regresor_matrix, axis=-1)
seas_decomp[k] = np.expand_dims(seas_regression, 0)
total_seas_regression += seas_regression
else:
total_seas_regression = np.zeros((1, df.shape[0]), dtype=np.double)
return total_seas_regression, seas_decomp
def _set_levs_and_seas(self, df, training_meta):
response_col = training_meta['response_col']
date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# use ktrlite to derive levs and seas
ktrlite = KTRLite(
response_col=response_col,
date_col=date_col,
level_knot_scale=self.level_knot_scale,
level_segments=self.level_segments,
level_knot_dates=self.level_knot_dates,
level_knot_distance=self.level_knot_distance,
seasonality=self.seasonality,
seasonality_fs_order=self.seasonality_fs_order,
seasonal_initial_knot_scale=self.seasonal_initial_knot_scale,
seasonal_knot_scale=self.seasonal_knot_scale,
seasonality_segments=self.seasonality_segments,
degree_of_freedom=self.degree_of_freedom,
date_freq=self.date_freq,
estimator='stan-map',
**self.ktrlite_optim_args
)
ktrlite.fit(df=df)
# self._ktrlite_model = ktrlite
ktrlite_pt_posteriors = ktrlite.get_point_posteriors()
ktrlite_obs_scale = ktrlite_pt_posteriors['map']['obs_scale']
# load _seasonality and _seasonality_fs_order
self._seasonality = ktrlite._model._seasonality
self._seasonality_fs_order = ktrlite._model._seasonality_fs_order
for seas in self._seasonality:
self._seasonality_labels.append('seasonality_{}'.format(seas))
# if input None for upper bound of residuals scale, use data-driven input
if self.residuals_scale_upper is None:
# make it 5 times to have some buffer in case we over-fit in KTRLite
self._residuals_scale_upper = min(ktrlite_obs_scale * 5, training_meta['response_sd'])
# this part is to extract level and seasonality result from KTRLite
self._level_knots = np.squeeze(ktrlite_pt_posteriors['map']['lev_knot'])
self._level_knot_dates = ktrlite._model._level_knot_dates
tp = np.arange(1, num_of_observations + 1) / num_of_observations
# # trim level knots dates when they are beyond training dates
# lev_knot_dates = list()
# lev_knots = list()
# for i, x in enumerate(self.level_knot_dates):
# if (x <= df[date_col].max()) and (x >= df[date_col].min()):
# lev_knot_dates.append(x)
# lev_knots.append(self._level_knots[i])
# self._level_knot_dates = pd.to_datetime(lev_knot_dates)
# self._level_knots = np.array(lev_knots)
self._level_knots_idx = get_knot_idx(
date_array=date_array,
num_of_obs=None,
knot_dates=self._level_knot_dates,
knot_distance=None,
num_of_segments=None,
date_freq=self.date_freq,
)
self.knots_tp_level = (1 + self._level_knots_idx) / num_of_observations
self._kernel_level = sandwich_kernel(tp, self.knots_tp_level)
self._num_knots_level = len(self._level_knot_dates)
if self._seasonality:
self._seasonality_coef_knot_dates = ktrlite._model._coef_knot_dates
coef_knots_flatten = ktrlite_pt_posteriors['map']['coef_knot']
coef_knots = dict()
pos = 0
for idx, label in enumerate(self._seasonality_labels):
order = self._seasonality_fs_order[idx]
coef_knots[label] = coef_knots_flatten[..., pos:(pos + 2 * order), :]
pos += 2 * order
self._seasonality_coef_knots = coef_knots
# we just need total here and because of
self._seas_term, _ = self._generate_seas(
df,
training_meta,
self._seasonality_coef_knot_dates,
self._seasonality_coef_knots,
self._seasonality,
self._seasonality_fs_order,
self._seasonality_labels)
# remove batch size as an input for models
self._seas_term = np.squeeze(self._seas_term, 0)
def _filter_coef_prior(self, df):
if self._coef_prior_list and len(self._regressor_col) > 0:
# iterate over a copy due to the removal operation
for test_dict in self._coef_prior_list[:]:
prior_regressor_col = test_dict[KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value]
m = test_dict[KTRTimePointPriorKeys.PRIOR_MEAN.value]
sd = test_dict[KTRTimePointPriorKeys.PRIOR_SD.value]
end_tp_idx = min(test_dict[KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value], df.shape[0])
start_tp_idx = min(test_dict[KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value], df.shape[0])
if start_tp_idx < end_tp_idx:
expected_shape = (end_tp_idx - start_tp_idx, len(prior_regressor_col))
test_dict.update({KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value: end_tp_idx})
test_dict.update({KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value: start_tp_idx})
# mean/sd expanding
test_dict.update({KTRTimePointPriorKeys.PRIOR_MEAN.value: np.full(expected_shape, m)})
test_dict.update({KTRTimePointPriorKeys.PRIOR_SD.value: np.full(expected_shape, sd)})
else:
# removing invalid prior
self._coef_prior_list.remove(test_dict)
def set_dynamic_attributes(self, df, training_meta):
"""Overriding: func: `~orbit.models.BaseETS._set_dynamic_attributes"""
self._set_regressor_matrix(df, training_meta)
self._set_coefficients_kernel_matrix(df, training_meta)
self._set_knots_scale_matrix(df, training_meta)
self._set_levs_and_seas(df, training_meta)
self._filter_coef_prior(df)
self._set_valid_response_attributes(training_meta)
@staticmethod
def _concat_regression_coefs(pr_beta=None, rr_beta=None):
"""Concatenates regression posterior matrix
In the case that `pr_beta` or `rr_beta` is a 1d tensor, transform to 2d tensor and
concatenate.
Args
----
pr_beta : array like
postive-value constrainted regression betas
rr_beta : array like
regular regression betas
Returns
-------
array like
concatenated 2d array of shape (1, len(rr_beta) + len(pr_beta))
"""
regressor_beta = None
if pr_beta is not None and rr_beta is not None:
pr_beta = pr_beta if len(pr_beta.shape) == 2 else pr_beta.reshape(1, -1)
rr_beta = rr_beta if len(rr_beta.shape) == 2 else rr_beta.reshape(1, -1)
regressor_beta = torch.cat((rr_beta, pr_beta), dim=1)
elif pr_beta is not None:
regressor_beta = pr_beta
elif rr_beta is not None:
regressor_beta = rr_beta
return regressor_beta
def predict(self, posterior_estimates, df, training_meta, prediction_meta,
coefficient_method="smooth",
include_error=False, store_prediction_array=False, **kwargs):
"""Vectorized version of prediction math
Parameters
----
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
this mainly impacts the aggregated estimation method; full bayesian should not be impacted
include_error : bool
if generating the noise samples
store_prediction_array : bool
if storing the prediction array
"""
################################################################
# Model Attributes
################################################################
# FIXME: do we still need this?
model = deepcopy(posterior_estimates)
arbitrary_posterior_value = list(model.values())[0]
num_sample = arbitrary_posterior_value.shape[0]
################################################################
# Prediction Attributes
################################################################
output_len = prediction_meta[PredictionMetaKeys.PREDICTION_DF_LEN.value]
prediction_start = prediction_meta[PredictionMetaKeys.START.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
training_end = training_meta[TrainingMetaKeys.END.value]
# Here assume dates are ordered and consecutive
# if prediction_meta[PredictionMetaKeys.START.value] > self.training_end,
# assume prediction starts right after train end
if prediction_start > training_end:
# time index for prediction start
start = num_of_observations
else:
start = pd.Index(date_array).get_loc(prediction_start)
new_tp = np.arange(start + 1, start + output_len + 1) / num_of_observations
if include_error:
# in-sample knots
lev_knot_in = model.get(BaseSamplingParameters.LEVEL_KNOT.value)
# TODO: hacky way; let's just assume last two knot distance is knots distance for all knots
lev_knot_width = self.knots_tp_level[-1] - self.knots_tp_level[-2]
# check whether we need to put new knots for simulation
if new_tp[-1] >= self.knots_tp_level[-1] + lev_knot_width:
# derive knots tp
knots_tp_level_out = np.arange(self.knots_tp_level[-1] + lev_knot_width, new_tp[-1], lev_knot_width)
new_knots_tp_level = np.concatenate([self.knots_tp_level, knots_tp_level_out])
lev_knot_out = np.random.laplace(0, self.level_knot_scale,
size=(lev_knot_in.shape[0], len(knots_tp_level_out)))
lev_knot_out = np.cumsum(np.concatenate([lev_knot_in[:, -1].reshape(-1, 1), lev_knot_out],
axis=1), axis=1)[:, 1:]
lev_knot = np.concatenate([lev_knot_in, lev_knot_out], axis=1)
else:
new_knots_tp_level = self.knots_tp_level
lev_knot = lev_knot_in
kernel_level = sandwich_kernel(new_tp, new_knots_tp_level)
else:
lev_knot = model.get(BaseSamplingParameters.LEVEL_KNOT.value)
kernel_level = sandwich_kernel(new_tp, self.knots_tp_level)
obs_scale = model.get(BaseSamplingParameters.OBS_SCALE.value)
obs_scale = obs_scale.reshape(-1, 1)
# if self._seasonality is not None:
# condition of seasonality is checked inside
total_seas, seas_decomp = self._generate_seas(df, training_meta,
self._seasonality_coef_knot_dates,
self._seasonality_coef_knots,
self._seasonality,
self._seasonality_fs_order,
self._seasonality_labels)
# # seas is 1-d array, add the batch size back
# seas = np.expand_dims(seas, 0)
# else:
# # follow component shapes
# seas = np.zeros((1, output_len))
trend = np.matmul(lev_knot, kernel_level.transpose((1, 0)))
regression = np.zeros(trend.shape)
if self._num_of_regressors > 0:
regressor_matrix = df.filter(items=self._regressor_col, ).values
regressor_betas = self._get_regression_coefs_matrix(
training_meta,
posterior_estimates,
coefficient_method,
date_array=prediction_meta[TrainingMetaKeys.DATE_ARRAY.value]
)
regression = np.sum(regressor_betas * regressor_matrix, axis=-1)
if include_error:
epsilon = nct.rvs(self.degree_of_freedom, nc=0, loc=0,
scale=obs_scale, size=(num_sample, len(new_tp)))
trend += epsilon
pred_array = trend + total_seas + regression
# if decompose output dictionary of components
decomp_dict = {
'prediction': pred_array,
'trend': trend,
'regression': regression
}
# this is an input from ktrlite
decomp_dict.update(seas_decomp)
if store_prediction_array:
self.pred_array = pred_array
else:
self.pred_array = None
return decomp_dict
def _get_regression_coefs_matrix(self, training_meta, posteriors, coefficient_method='smooth', date_array=None):
"""internal function to provide coefficient matrix given a date array
Args
----
posteriors : dict
posterior samples
date_array : array like
array of date stamp
coefficient_method : str
either "smooth" or "empirical". when "empirical" is used, curves are sampled/aggregated directly
from beta posteriors; when "smooth" is used, first extract sampled/aggregated posteriors of knots
then beta.
this mainly impacts the aggregated estimation method; full bayesian should not be impacted.
"""
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
training_start = training_meta[TrainingMetaKeys.START.value]
training_end = training_meta[TrainingMetaKeys.END.value]
train_date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
if self._num_of_regular_regressors + self._num_of_positive_regressors + self._num_of_negative_regressors == 0:
return None
# if date_array not specified, coefficients in the training period will be retrieved
if date_array is None:
if coefficient_method == 'smooth':
coef_knots = posteriors.get(RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
# only 1 knot for 0 segments
if self.regression_segments == 0:
coef_knots = np.expand_dims(coef_knots, -1)
if len(self._regressor_col) == 1:
coef_knots = np.expand_dims(coef_knots, 1)
# result in batch x time step x regressor size shape
regressor_betas = np.matmul(coef_knots, self._kernel_coefficients.transpose((1, 0)))
# if len(self._regressor_col) == 1:
# regressor_betas = np.expand_dims(regressor_betas, 0)
regressor_betas = regressor_betas.transpose((0, 2, 1))
elif coefficient_method == 'empirical':
regressor_betas = posteriors.get(RegressionSamplingParameters.COEFFICIENTS.value)
else:
raise IllegalArgument('Wrong coefficient_method:{}'.format(coefficient_method))
else:
date_array =
|
pd.to_datetime(date_array)
|
pandas.to_datetime
|
"""
Calculate the fragment size of PE reads,
Available tools: CollectInsertSizeMetrics (Picard), bamPEFragmentSize (deeptools)
Here are the samtools method:
Statistics:
1. length count
2. mean/medium/qrt.
"""
import os
import sys
import pathlib
import numpy as np
import pandas as pd
import pysam
import tempfile
import logging
import hiseq
from multiprocessing import Pool
logging.basicConfig(
format='[%(asctime)s %(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
log = logging.getLogger(__name__)
log.setLevel('INFO')
class BamFragSize(object):
"""Calculate the read size of SE
single BAM file
sample size = 1000 (SE or PE)
> Table.csv
length strand count
"""
def __init__(self, bam, labels=None, asPE=False, maxRecords=0,
strandness=False, csv_file=None):
self.bam = bam
self.asPE = asPE # fragment sizes
self.maxRecords = maxRecords
self.strandness = strandness
self.csv_file = csv_file
# only for single bam file
if not isinstance(bam, str):
raise Exception('str expected, {} got'.format(type(bam).__name__))
if not self.isBam(bam):
raise Exception('bam={}, is not bam file'.format(bam))
# asPE: paired end only
if asPE and not self.isBamPE(bam):
raise Exception('asPE=True, but bam is not paired')
bam_name = os.path.splitext(os.path.basename(bam))[0]
self.labels = labels if labels else bam_name
self.freqTable = self.calFragSize(bam) # dataframe
def is_empty(self, bam):
"""Check input file is empty or not
pysam.Samfile().count
pysam.Samfile().mapped
"""
pysam.index(bam)
sam = pysam.Samfile(bam)
return sam.count() == 0 and sam.mapped == 0
def isBam(self, bam):
"""Check input is BAM file
update: self.bam
string
*.bam
"""
if bam is None:
bam = self.bam
flag = False
if isinstance(self.bam, str):
flag = self.bam.endswith('.bam') and os.path.exists(self.bam)
return flag
def isBamPE(self, bam, topn=1000):
"""Check input bam is Paired end alignment"""
flag = False
if self.isBam(bam):
samfile = pysam.AlignmentFile(bam)
flag = [read.is_paired for read in samfile.head(topn)]
samfile.close()
return all(flag)
def calFreq(self, x, return_dataframe=True):
"""Calculate the frequency of list
['length', 'strand']
return dataframe
"""
header = ['length', 'strand', 'count']
if isinstance(x, list):
df = pd.DataFrame(x, columns=header).groupby(['length', 'strand']).count().reset_index()
else:
df = pd.DataFrame(columns=header)
if not self.strandness:
df['strand'] = '*'
return df
def calFragSize(self, bam=None, chunk=1000000):
"""Extract the read length
length count id
"""
if bam is None:
bam = self.bam
# for SE or PE
if self.isBamPE(bam):
pass
else:
pass
# empty check
if self.is_empty(bam):
log.error('bam is empty: {}'.format(bam))
return pd.DataFrame(columns=['length','strand','count','id']) #!!
# read sam/bam file
sam = pysam.AlignmentFile(bam)
counter = 0
fragSizes = []
frames = []
for read in sam:
if self.asPE:
# fragment sizes
if read.is_proper_pair \
and not read.is_unmapped \
and not read.mate_is_unmapped \
and not read.is_read1 \
and not read.is_duplicate \
and read.template_length > 0:
flag += 1
fragSizes.append([read.template_length, strand, 1])
else:
# reads sizes
if not read.is_unmapped \
and not read.is_duplicate > 0:
counter += 1
strand = '-' if read.is_reverse else '+'
fragSizes.append([read.infer_query_length(), strand, 1])
# sample size
if self.maxRecords > 0 and counter > self.maxRecords:
log.info('stop at: {}'.format(counter ))
break # stop
# chunk
if counter > 0 and counter%chunk == 0:
frames.append(self.calFreq(fragSizes))
fragSizes = [] # empty
log.info('{} : {} {}'.format('Processed', counter , self.labels))
# last chunk
if len(fragSizes) > 0:
frames.append(self.calFreq(fragSizes))
fragSizes = [] # empty
log.info('{} : {} {}'.format('Processed', counter , self.labels))
# overall
df = pd.concat(frames, axis=0).groupby(['length', 'strand']).sum().reset_index()
df['id'] = self.labels
return df
def distribution(self):
"""Basic statistics values
value + freq
mean, medium, mode, std, min, max, Q1, Q2, Q3
"""
if self.freqTable.shape[0] == 0:
out = pd.DataFrame(
columns=['mean', 'median', 'mode', 'std', 'min', 'max', 'Q1',
'Q2', 'Q3'])
else:
val = self.freqTable['length']
freq = self.freqTable['count']
inserts = np.repeat(val, freq)
# statistics
q_mean = np.mean(inserts)
q_median = np.median(inserts)
q_median_dev = np.median(np.absolute(inserts - q_median))
q_mode = val[np.argmax(freq)]
q_std = np.std(inserts)
q_min = np.min(inserts)
q_max = np.max(inserts)
q_qual = np.quantile(inserts, [0.25, 0.5, 0.75], axis=0)
# core distribution
s = np.array([q_mean, q_median, q_mode, q_std, q_min, q_max]).round(2)
s = np.append(s, q_qual)
# DataFrame
out =
|
pd.DataFrame(s)
|
pandas.DataFrame
|
import os
from multiprocessing import cpu_count
from pathlib import Path
import pickle
import re
import plaidml.keras
plaidml.keras.install_backend()
from PIL import Image
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, normalize, scale, minmax_scale, robust_scale, LabelEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import linear_model, ensemble
from scipy.special import softmax
from sklearn import metrics
from sklearn.externals import joblib
from nltk import word_tokenize
from tqdm import tqdm
import keras
from keras import layers
from keras_preprocessing.image import ImageDataGenerator, img_to_array
from keras import backend as K
import tensorflow as tf
import xgboost
from catboost import CatBoostClassifier, Pool
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # workaround for macOS mkl issue
"""Stacked Ensemble using probabilties predicted on validation and test"""
psychic_learners_dir = Path.cwd().parent
BIG_CATEGORY = 'fashion'
print(BIG_CATEGORY)
ROOT_PROBA_FOLDER = str(psychic_learners_dir / 'data' / 'probabilities')
TRAIN_CSV = str(psychic_learners_dir / 'data' / f'{BIG_CATEGORY}_train_split.csv')
VALID_CSV = str(psychic_learners_dir / 'data' / f'{BIG_CATEGORY}_valid_split.csv')
TEST_CSV = str(psychic_learners_dir / 'data' / f'{BIG_CATEGORY}_test_split.csv')
N_CLASSES_FOR_CATEGORIES = {'beauty': 17, 'fashion': 14, 'mobile': 27}
N_CLASSES = N_CLASSES_FOR_CATEGORIES[BIG_CATEGORY]
BATCH_SIZE = 128
# list of models to include in stack
model_names = [
'char_cnn',
'extractions_fasttext',
'image_model',
'title_fasttext',
'word_cnn',
'word_rnn',
'rcnn',
'bert_v1',
'nb_ngrams_2',
'adv_abblstm',
'atten_bilstm',
'ind_rnn',
'multi_head',
'log_reg_tfidf',
'knn_itemid_400_50', # fashion
#'KNN_itemid', # non-fashion
'knn5_tfidf',
'knn10_tfidf',
'knn40_tfidf',
#'rf_itemid', # non-fashion
]
unwanted_models = [
'log_reg',
'capsule_net',
'rf',
'rf_tfidf',
'nb_ngrams',
'xgb_itemid_index',
'meta',
'knn5',
'knn10',
'knn20_tfidf',
'xgb',
'xgb_tfidf',
'bert_large',
'knn80_tfidf',
'knn160_tfidf',
'nb_extractions',
]
N_MODELS = len(model_names)
print(f'Number Models: {N_MODELS}')
meta_model_names = []
def read_probabilties(proba_folder, subset='valid',
model_names=model_names):
"""Reads saved .npy validation and test predicted probabilities from PsychicLearners/data/probabilities"""
proba_folder = Path(proba_folder)
all_probabilities = []
for folder in proba_folder.iterdir():
if not folder.is_dir():
continue
elif model_names and folder.name not in model_names:
if folder.name not in unwanted_models:
print(folder.name, 'not included')
continue
for npy in folder.glob(f'*{subset}.npy'):
prob = np.load(str(npy))
if not (prob >= 0).all():
prob = softmax(prob, axis=1)
prob = normalize(prob, axis=1)
#prob = scale(prob, axis=1)
all_probabilities.append(prob)
all_probabilities = np.concatenate([prob for prob in all_probabilities], axis=1)
print(all_probabilities.shape)
print(N_MODELS * N_CLASSES)
return all_probabilities
MODEL_INPUT_SHAPE = (N_CLASSES * N_MODELS,)
def ensemble_model(dense1=None, dense2=None, n_layers=4, dropout=0.25, k_reg=0):
"""Creates NN ensemble model"""
k_regularizer = keras.regularizers.l2(k_reg)
input_tensor = keras.layers.Input(shape=MODEL_INPUT_SHAPE)
if dense1:
x = layers.Dense(dense1, activation=None, kernel_initializer='he_uniform',
kernel_regularizer=k_regularizer)(input_tensor)
x = layers.PReLU()(x)
x = layers.Dropout(dropout)(x)
x = layers.BatchNormalization()(x)
if dense2:
for n in range(n_layers-1):
x = layers.Dense(dense2, activation=None, kernel_initializer='he_uniform',
kernel_regularizer=k_regularizer)(x)
x = layers.PReLU()(x)
if not n == n_layers-2: # Don't want dropout and BN on last layer
x = layers.Dropout(dropout)(x)
x = layers.BatchNormalization()(x)
if dense1:
predictions = layers.Dense(N_CLASSES, activation='softmax', kernel_regularizer=k_regularizer)(x)
else:
predictions = layers.Dense(
N_CLASSES, activation='softmax', kernel_regularizer=k_regularizer)(input_tensor)
model = keras.models.Model(inputs=input_tensor, outputs=predictions)
return model
TWO_HEAD_SHAPE = int(N_CLASSES * N_MODELS / 2)
def two_head_ensemble_model(dense1=None, dense2=None, dropout=0.25, k_reg=0.00000001):
"""Not used due to no performance gains"""
k_regularizer = keras.regularizers.l2(k_reg)
input_tensor = keras.layers.Input(shape=(TWO_HEAD_SHAPE,))
x = layers.Dense(dense1, activation=None, kernel_initializer='he_uniform',
kernel_regularizer=k_regularizer)(input_tensor)
x = layers.PReLU()(x)
out = layers.Dropout(dropout)(x)
half_model = keras.models.Model(inputs=input_tensor, outputs=out)
inp_a = keras.layers.Input(shape=(TWO_HEAD_SHAPE,))
inp_b = keras.layers.Input(shape=(TWO_HEAD_SHAPE,))
out_a = half_model(inp_a)
out_b = half_model(inp_b)
concatenated = keras.layers.concatenate([out_a, out_b])
x = layers.Dense(dense2, activation=None, kernel_initializer='he_uniform',
kernel_regularizer=k_regularizer)(concatenated)
x= layers.PReLU()(x)
predictions = layers.Dense(N_CLASSES, activation='softmax', kernel_regularizer=k_regularizer)(x)
model = keras.models.Model(inputs=[inp_a, inp_b], outputs=predictions)
return model
def train_nn(dense1=150, dense2=32, n_layers=4, dropout=0.2, k_reg=0.00000001,
lr_base=0.01, epochs=50, lr_decay_factor=1,
checkpoint_dir=str(psychic_learners_dir / 'data' / 'keras_checkpoints' / BIG_CATEGORY / 'combined_nn'),
model_name='1', extract_probs=False):
"""Train NN ensemble, extract_probs=False will perform 4 fold CV. extract_probs=True will save probabilities against validation fold and save model"""
train_x = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
train_y = pd.read_csv(VALID_CSV)['Category'].values
kfold = StratifiedKFold(n_splits=4, random_state=7, shuffle=True)
cvscores = []
encoder = OneHotEncoder(sparse=False)
if not extract_probs:
for train, test in kfold.split(train_x, train_y):
print(len(train))
print(len(test))
model = ensemble_model(dense1=dense1, dense2=dense2, n_layers=n_layers,
dropout=dropout, k_reg=k_reg)
decay = lr_base/(epochs * lr_decay_factor)
sgd = keras.optimizers.SGD(lr=lr_base, decay=decay, momentum=0.9, nesterov=True)
# callbacks
early_stopping = keras.callbacks.EarlyStopping(
monitor='val_acc', min_delta=0.0001, patience=7)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5,
verbose=1, mode='auto', min_delta=0.00001,
cooldown=0, min_lr=0)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
y_train = encoder.fit_transform(train_y.reshape(-1, 1))
model.fit(x=train_x[train], y=y_train[train], batch_size=BATCH_SIZE, epochs=1000, verbose=2,
callbacks=[early_stopping, reduce_lr], validation_data=(train_x[test], y_train[test]),
shuffle=True, class_weight=None, steps_per_epoch=None, validation_steps=None)
scores = model.evaluate(train_x[test], y_train[test], verbose=0)
print("%s: %.4f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("CV ACC %.4f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
if extract_probs:
X_train, X_valid, y_train, y_valid = train_test_split(train_x, train_y,
stratify=train_y,
test_size=0.25, random_state=42)
y_train = encoder.fit_transform(y_train.reshape(-1, 1))
y_valid = encoder.fit_transform(y_valid.reshape(-1, 1))
test_x = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='test')
model = ensemble_model(dense1=dense1, dense2=dense2, n_layers=n_layers,
dropout=dropout, k_reg=k_reg)
decay = lr_base/(epochs * lr_decay_factor)
sgd = keras.optimizers.SGD(lr=lr_base, decay=decay, momentum=0.9, nesterov=True)
# callbacks
checkpoint_path = os.path.join(
checkpoint_dir, '{}_checkpoints'.format(model_name))
os.makedirs(checkpoint_path, exist_ok=True)
early_stopping = keras.callbacks.EarlyStopping(
monitor='val_acc', min_delta=0.00001, patience=10)
ckpt = keras.callbacks.ModelCheckpoint(os.path.join(checkpoint_path, 'model.{epoch:02d}-{val_acc:.4f}.h5'),
monitor='val_acc', verbose=1, save_best_only=True)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5,
verbose=1, mode='auto', min_delta=0.00001,
cooldown=0, min_lr=0)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=X_train, y=y_train, batch_size=BATCH_SIZE, epochs=1000, verbose=2,
callbacks=[ckpt, reduce_lr, early_stopping], validation_data=(
X_valid, y_valid),
shuffle=True, class_weight=None, steps_per_epoch=None, validation_steps=None)
val_preds = model.predict(X_valid)
test_preds = model.predict(test_x)
print(test_preds.shape)
os.makedirs(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_nn'), exist_ok=True)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_nn', 'valid.npy'), val_preds)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_nn', 'test.npy'), test_preds)
def train_two_head_model(lr_base=0.01, epochs=50, lr_decay_factor=1,
checkpoint_dir=str(psychic_learners_dir / 'data' / 'keras_checkpoints' / BIG_CATEGORY / 'combined_2_head'),
model_name='1'):
"""Not used"""
model = two_head_ensemble_model(dense1=200, dense2=200, # mobile may need more dropout
dropout=0.3, k_reg=0.00000001)
decay = lr_base/(epochs * lr_decay_factor)
sgd = keras.optimizers.SGD(lr=lr_base, decay=decay, momentum=0.9, nesterov=True)
# callbacks
checkpoint_path = os.path.join(checkpoint_dir, '{}_checkpoints'.format(model_name))
os.makedirs(checkpoint_path, exist_ok=True)
ckpt = keras.callbacks.ModelCheckpoint(os.path.join(checkpoint_path, 'model.{epoch:02d}-{val_acc:.2f}.h5'),
monitor='val_acc', verbose=1, save_best_only=True)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=5,
verbose=1, mode='auto',
cooldown=0, min_lr=0)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
train_x = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
train_y = pd.read_csv(VALID_CSV)['Category'].values
X_train, X_valid, y_train, y_valid = train_test_split(train_x, train_y,
stratify=train_y,
test_size=0.25, random_state=42)
encoder = OneHotEncoder(sparse=False)
y_train = encoder.fit_transform(y_train.reshape(-1, 1))
y_valid = encoder.fit_transform(y_valid.reshape(-1, 1))
model.fit([X_train[:, :TWO_HEAD_SHAPE], X_train[:, TWO_HEAD_SHAPE:]], y=y_train, batch_size=BATCH_SIZE, epochs=1000, verbose=2,
callbacks=[ckpt, reduce_lr], validation_data=([X_valid[:, :TWO_HEAD_SHAPE], X_valid[:, TWO_HEAD_SHAPE:]], y_valid),
shuffle=True, class_weight=None, steps_per_epoch=None, validation_steps=None)
with open("word_dict.pickle", "rb") as f:
word_dict = pickle.load(f)
def build_word_dataset(titles, word_dict, document_max_len):
df = pd.DataFrame(data={'title': titles})
x = list(map(lambda d: word_tokenize(clean_str(d)), df["title"]))
x = list(
map(lambda d: list(map(lambda w: word_dict.get(w, word_dict["<unk>"]), d)), x))
x = list(map(lambda d: d + [word_dict["<eos>"]], x))
x = list(map(lambda d: d[:document_max_len], x))
x = list(map(lambda d: d + (document_max_len - len(d))
* [word_dict["<pad>"]], x))
return x
def clean_str(text):
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.strip().lower()
return text
def batch_iter(inputs, batch_size):
inputs = np.array(inputs)
num_batches_per_epoch = (len(inputs) - 1) // batch_size + 1
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(inputs))
yield inputs[start_index:end_index]
WORD_MAX_LEN = 15
TEXT_MODEL_PATH = str(psychic_learners_dir / 'data' / 'keras_checkpoints' /
BIG_CATEGORY / 'word_cnn' / '0.8223667828685259.ckpt-686000')
def extract_text_features(titles, subset):
"""titles: array of titles, not used"""
test_x = build_word_dataset(titles, word_dict, WORD_MAX_LEN)
graph = tf.Graph()
all_text_features = []
with graph.as_default():
with tf.Session() as sess:
saver = tf.train.import_meta_graph(
"{}.meta".format(TEXT_MODEL_PATH))
saver.restore(sess, TEXT_MODEL_PATH)
x = graph.get_operation_by_name("x").outputs[0]
y = graph.get_operation_by_name("Reshape").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
batches = batch_iter(test_x, BATCH_SIZE)
for batch_x in batches:
feed_dict = {
x: batch_x,
is_training: False
}
text_features = sess.run(y, feed_dict=feed_dict)
for text_feature in text_features:
all_text_features.append(text_feature)
all_text_features = np.array(all_text_features)
os.makedirs(str(psychic_learners_dir / 'data' / 'features' / BIG_CATEGORY / 'word_cnn'), exist_ok=True)
np.save(str(psychic_learners_dir / 'data' / 'features' / BIG_CATEGORY / 'word_cnn' / f'{subset}.npy'), all_text_features)
return all_text_features
def train_catboost(model_name, extract_probs=False, save_model=False):
"""Not used"""
train_x = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
train_y = pd.read_csv(VALID_CSV)['Category'].values
X_train, X_valid, y_train, y_valid = train_test_split(train_x, train_y,
stratify=train_y,
test_size=0.25, random_state=42)
classifier = CatBoostClassifier(
iterations=150, learning_rate=0.03, depth=9, l2_leaf_reg=2,
loss_function='MultiClass', border_count=32)
train_data = Pool(X_train, y_train)
valid_data = Pool(X_valid, y_valid)
classifier.fit(train_data)
# predict the labels on validation dataset
predictions = classifier.predict(train_data)
print('Train Acc: {}'.format(metrics.accuracy_score(predictions, y_train)))
predictions = classifier.predict(valid_data)
print('Valid accuracy: ', metrics.accuracy_score(predictions, y_valid))
if save_model:
checkpoint_path = psychic_learners_dir / 'data' / 'keras_checkpoints' / BIG_CATEGORY / 'combined_catboost' / '{}_saved_model'.format(model_name)
os.makedirs(str(checkpoint_path), exist_ok=True)
classifier.save_model(str(checkpoint_path / 'catboost_model'))
def train_adaboost_extra_trees(model_name, extract_probs=False, save_model=False, stratified=False, param_dict=None):
if BIG_CATEGORY == 'fashion' and 'KNN_itemid' in model_names:
raise Exception('Warning KNN itemid in fashion')
train_probs = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
valid_df = pd.read_csv(VALID_CSV)
train_y = valid_df['Category'].values
encoder = LabelEncoder()
train_y = encoder.fit_transform(train_y)
if param_dict:
print(param_dict)
classifier = xgboost.XGBClassifier(**param_dict)
else:
base_estim = ensemble.ExtraTreesClassifier(n_estimators=110, criterion='gini', max_depth=None, min_samples_split=2, # 80.8766%
min_samples_leaf=1, max_features='auto')
classifier = ensemble.AdaBoostClassifier(base_estimator=base_estim, n_estimators=60, learning_rate=1.0,
algorithm='SAMME.R')
if stratified:
kfold = StratifiedKFold(n_splits=4, random_state=7, shuffle=True)
results = cross_val_score(
classifier, train_probs, train_y, cv=kfold, n_jobs=-1, )
print("Accuracy: %.4f%% (%.2f%%)" %
(results.mean()*100, results.std()*100))
elif not stratified:
X_train, X_valid, y_train, y_valid = train_test_split(train_probs, train_y,
stratify=train_y,
test_size=0.25, random_state=42)
classifier.fit(X_train, y_train)
# predict the labels on validation dataset
predictions = classifier.predict(X_train)
print('Train Acc: {}'.format(metrics.accuracy_score(predictions, y_train)))
predictions = classifier.predict(X_valid)
print('Valid accuracy: ', metrics.accuracy_score(predictions, y_valid))
if save_model:
assert not stratified
checkpoint_path = psychic_learners_dir / 'data' / 'keras_checkpoints' / \
BIG_CATEGORY / 'combined_ada' / '{}_saved_model'.format(model_name)
os.makedirs(str(checkpoint_path), exist_ok=True)
joblib.dump(classifier, str(checkpoint_path / "adaboost.joblib"))
if extract_probs:
assert not stratified
test_x = read_probabilties(proba_folder=os.path.join(
ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='test')
val_preds = classifier.predict_proba(X_valid)
test_preds = classifier.predict_proba(test_x)
print(test_preds.shape)
os.makedirs(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY,
'meta', model_name + '_ada'), exist_ok=True)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY,
'meta', model_name + '_ada', 'valid.npy'), val_preds)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY,
'meta', model_name + '_ada', 'test.npy'), test_preds)
def change_wrong_category():
"""Not used due to lack of submissions"""
valid_df = pd.read_csv(VALID_CSV)
with open('/Users/sunyitao/Downloads/all_corrected_wrongs.txt') as f:
checked_itemids = f.readlines()
checked_itemids = [itemid.replace('\n', '') for itemid in checked_itemids]
suspected_wrong = pd.read_csv('/Users/sunyitao/Documents/Projects/GitHub/PsychicLearners/data/suspected_wrong_valid.csv')
confirmed_wrong = suspected_wrong[suspected_wrong['itemid'].isin(checked_itemids)]
categories = []
for itemid, category in tqdm(valid_df[['itemid', 'Category']].values):
if itemid in checked_itemids:
category = confirmed_wrong['expected_category'][confirmed_wrong['itemid'] == itemid].values
categories.append(category)
valid_df['Category'] = categories #TODO this does not work
valid_df.to_csv(str(psychic_learners_dir / 'data' / f'corrected_{BIG_CATEGORY}_valid_split.csv'))
def train_xgb(model_name, extract_probs=False, save_model=False, stratified=False, param_dict=None):
if BIG_CATEGORY == 'fashion' and 'KNN_itemid' in model_names:
raise Exception('Warning KNN itemid in fashion')
"""KNN itemid is gives good performance on validation but very poor performance on public leaderboard due to different itemid distributions
extract_probs=False, save_model=False, stratified=True to perform 4 fold CV
extract_probs=True, save_model=True, stratified=False to save out-of-fold probabilities and model"""
train_probs = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
#train_elmo = np.load(str(psychic_learners_dir / 'data' / 'features' / BIG_CATEGORY / 'elmo' / 'valid_flat.npy'))
#train_probs = np.concatenate([train_probs, train_elmo], axis=1)
valid_df = pd.read_csv(VALID_CSV)
train_y = valid_df['Category'].values
encoder = LabelEncoder()
train_y = encoder.fit_transform(train_y)
if param_dict:
print(param_dict)
classifier = xgboost.XGBClassifier(**param_dict)
else:
classifier = xgboost.XGBClassifier(
max_depth=5, learning_rate=0.05, n_estimators=150, silent=True,
objective='binary:logistic', booster='gbtree', n_jobs=-1, nthread=None,
gamma=0, min_child_weight=2, max_delta_step=0, subsample=1.0, colsample_bytree=1.0,
colsample_bylevel=1, reg_alpha=0.01, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, random_state=0, seed=None, missing=None)
if stratified:
kfold = StratifiedKFold(n_splits=4, random_state=7, shuffle=True)
results = cross_val_score(classifier, train_probs, train_y, cv=kfold, n_jobs=-1, )
print("Accuracy: %.4f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
elif not stratified:
X_train, X_valid, y_train, y_valid = train_test_split(train_probs, train_y,
stratify=train_y,
test_size=0.25, random_state=42)
classifier.fit(X_train, y_train)
# predict the labels on validation dataset
predictions = classifier.predict(X_train)
print('Train Acc: {}'.format(metrics.accuracy_score(predictions, y_train)))
predictions = classifier.predict(X_valid)
print('Valid accuracy: ', metrics.accuracy_score(predictions, y_valid))
if save_model:
assert not stratified
checkpoint_path = psychic_learners_dir / 'data' / 'keras_checkpoints' / \
BIG_CATEGORY / 'combined_xgb' / '{}_saved_model'.format(model_name)
os.makedirs(str(checkpoint_path), exist_ok=True)
joblib.dump(classifier, str(checkpoint_path / "xgb.joblib.dat"))
if extract_probs:
assert not stratified
test_x = read_probabilties(proba_folder=os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='test')
val_preds = classifier.predict_proba(X_valid)
test_preds = classifier.predict_proba(test_x)
print(test_preds.shape)
os.makedirs(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_xgb'), exist_ok=True)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_xgb', 'valid.npy'), val_preds)
np.save(os.path.join(ROOT_PROBA_FOLDER, BIG_CATEGORY, 'meta', model_name + '_xgb', 'test.npy'), test_preds)
def bayes_search_xgb(param_dict):
"""Bayesian optimisation of xgb parameters"""
train_probs = read_probabilties(proba_folder=os.path.join(
ROOT_PROBA_FOLDER, BIG_CATEGORY), subset='valid')
valid_df = pd.read_csv(VALID_CSV)
train_y = valid_df['Category'].values
encoder = LabelEncoder()
train_y = encoder.fit_transform(train_y)
bayes_cv_tuner = BayesSearchCV(
estimator=xgboost.XGBClassifier(**param_dict),
search_spaces={
'learning_rate': (0.01, 1.0, 'log-uniform'),
'min_child_weight': (0, 4),
'max_depth': (6, 9),
'max_delta_step': (0, 20),
'subsample': (0.7, 1.0, 'uniform'),
'colsample_bytree': (0.7, 1.0, 'uniform'),
'colsample_bylevel': (0.7, 1.0, 'uniform'),
'reg_lambda': (1e-9, 1000, 'log-uniform'),
'reg_alpha': (1e-9, 1.0, 'log-uniform'),
'gamma': (1e-9, 0.5, 'log-uniform'),
'n_estimators': (50, 300),
'scale_pos_weight': (1e-6, 500, 'log-uniform')
},
cv=StratifiedKFold(n_splits=4, random_state=7, shuffle=True),
scoring='accuracy',
n_jobs=-1,
n_iter=100,
verbose=1,
refit=True,
random_state=7
)
def status_print(optim_result):
"""Status callback durring bayesian hyperparameter search"""
# Get all the models tested so far in DataFrame format
all_models =
|
pd.DataFrame(bayes_cv_tuner.cv_results_)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 14:50
Desc: 真气网-空气质量
https://www.zq12369.com/environment.php
空气质量在线监测分析平台的空气质量数据
https://www.aqistudy.cn/
"""
import json
import os
import re
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from akshare.utils import demjson
def _get_js_path(name: str = None, module_file: str = None) -> str:
"""
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "air", name)
return module_json_path
def _get_file_content(file_name: str = "crypto.js") -> str:
"""
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def has_month_data(href):
"""
Deal with href node
:param href: href
:type href: str
:return: href result
:rtype: str
"""
return href and re.compile("monthdata.php").search(href)
def air_city_table() -> pd.DataFrame:
"""
真气网-空气质量历史数据查询-全部城市列表
https://www.zq12369.com/environment.php?date=2019-06-05&tab=rank&order=DESC&type=DAY#rank
:return: 城市映射
:rtype: pandas.DataFrame
"""
url = "https://www.zq12369.com/environment.php"
date = "2020-05-01"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[1].iloc[1:, :]
del temp_df['降序']
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = ['序号', '省份', '城市', 'AQI', '空气质量', 'PM2.5浓度', '首要污染物']
temp_df['AQI'] = pd.to_numeric(temp_df['AQI'])
return temp_df
def air_quality_watch_point(
city: str = "杭州", start_date: str = "20220408", end_date: str = "20220409"
) -> pd.DataFrame:
"""
真气网-监测点空气质量-细化到具体城市的每个监测点
指定之间段之间的空气质量数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取
:type city: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., ""20200327""
:type end_date: str
:return: 指定城市指定日期区间的观测点空气质量
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/zhenqiapi.php"
file_data = _get_file_content(file_name="crypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
method = "GETCITYPOINTAVG"
ctx.call("encode_param", method)
ctx.call("encode_param", start_date)
ctx.call("encode_param", end_date)
city_param = ctx.call("encode_param", city)
ctx.call("encode_secret", method, city_param, start_date, end_date)
payload = {
"appId": "a01901d3caba1f362d69474674ce477f",
"method": ctx.call("encode_param", method),
"city": city_param,
"startTime": ctx.call("encode_param", start_date),
"endTime": ctx.call("encode_param", end_date),
"secret": ctx.call("encode_secret", method, city_param, start_date, end_date),
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
}
r = requests.post(url, data=payload, headers=headers)
data_text = r.text
data_json = demjson.decode(ctx.call("decode_result", data_text))
temp_df = pd.DataFrame(data_json["rows"])
return temp_df
def air_quality_hist(
city: str = "杭州",
period: str = "day",
start_date: str = "20190327",
end_date: str = "20200427",
) -> pd.DataFrame:
"""
真气网-空气历史数据
https://www.zq12369.com/
:param city: 调用 ak.air_city_table() 接口获取所有城市列表
:type city: str
:param period: "hour": 每小时一个数据, 由于数据量比较大, 下载较慢; "day": 每天一个数据; "month": 每个月一个数据
:type period: str
:param start_date: e.g., "20190327"
:type start_date: str
:param end_date: e.g., "20200327"
:type end_date: str
:return: 指定城市和数据频率下在指定时间段内的空气质量数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "https://www.zq12369.com/api/newzhenqiapi.php"
file_data = _get_file_content(file_name="outcrypto.js")
ctx = py_mini_racer.MiniRacer()
ctx.eval(file_data)
appId = "4f0e3a273d547ce6b7147bfa7ceb4b6e"
method = "CETCITYPERIOD"
timestamp = ctx.eval("timestamp = new Date().getTime()")
p_text = json.dumps(
{
"city": city,
"endTime": f"{end_date} 23:45:39",
"startTime": f"{start_date} 00:00:00",
"type": period.upper(),
},
ensure_ascii=False,
indent=None,
).replace(' "', '"')
secret = ctx.call("hex_md5", appId + method + str(timestamp) + "WEB" + p_text)
payload = {
"appId": "4f0e3a273d547ce6b7147bfa7ceb4b6e",
"method": "CETCITYPERIOD",
"timestamp": int(timestamp),
"clienttype": "WEB",
"object": {
"city": city,
"type": period.upper(),
"startTime": f"{start_date} 00:00:00",
"endTime": f"{end_date} 23:45:39",
},
"secret": secret,
}
need = (
json.dumps(payload, ensure_ascii=False, indent=None, sort_keys=False)
.replace(' "', '"')
.replace("\\", "")
.replace('p": ', 'p":')
.replace('t": ', 't":')
)
headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'Cache-Control': 'no-cache',
# 'Connection': 'keep-alive',
# 'Content-Length': '1174',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Cookie': 'UM_distinctid=1800e5142c5b85-04b8f11aa852f3-1a343370-1fa400-1800e5142c6b7e; CNZZDATA1254317176=1502593570-1649496979-%7C1649507817; city=%E6%9D%AD%E5%B7%9E; SECKEY_ABVK=eSrbUhd28Mjo7jf8Rfh+uY5E9C+tAhQ8mOfYJHSjSfY%3D; BMAP_SECKEY=N5fGcwdWpeJW46eZ<KEY>',
# 'Host': 'www.zq12369.com',
# 'Origin': 'https://www.zq12369.com',
# 'Pragma': 'no-cache',
# 'Referer': 'https://www.zq12369.com/environment.php?catid=4',
# 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"Windows"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
params = {"param": ctx.call("AES.encrypt", need)}
params = {"param": ctx.call("encode_param", need)}
r = requests.post(url, data=params, headers=headers)
temp_text = ctx.call("decryptData", r.text)
data_json = demjson.decode(ctx.call("b.decode", temp_text))
temp_df = pd.DataFrame(data_json["result"]["data"]["rows"])
temp_df.index = temp_df["time"]
del temp_df["time"]
temp_df = temp_df.astype(float, errors="ignore")
return temp_df
def air_quality_rank(date: str = "") -> pd.DataFrame:
"""
真气网-168 城市 AQI 排行榜
https://www.zq12369.com/environment.php?date=2020-03-12&tab=rank&order=DESC&type=DAY#rank
:param date: "": 当前时刻空气质量排名; "20200312": 当日空气质量排名; "202003": 当月空气质量排名; "2019": 当年空气质量排名;
:type date: str
:return: 指定 date 类型的空气质量排名数据
:rtype: pandas.DataFrame
"""
if len(date) == 4:
date = date
elif len(date) == 6:
date = "-".join([date[:4], date[4:6]])
elif date == '':
date = '实时'
else:
date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://www.zq12369.com/environment.php"
if len(date.split("-")) == 3:
params = {
"date": date,
"tab": "rank",
"order": "DESC",
"type": "DAY",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[1].iloc[1:, :]
elif len(date.split("-")) == 2:
params = {
"month": date,
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[2].iloc[1:, :]
elif len(date.split("-")) == 1 and date != "实时":
params = {
"year": date,
"tab": "rank",
"order": "DESC",
"type": "YEAR",
}
r = requests.get(url, params=params)
return pd.read_html(r.text)[3].iloc[1:, :]
if date == "实时":
params = {
"tab": "rank",
"order": "DESC",
"type": "MONTH",
}
r = requests.get(url, params=params)
return
|
pd.read_html(r.text)
|
pandas.read_html
|
import pandas as pd
import numpy as np
import math
import ipysheet
import ipywidgets as w
import functools
import os
import pyperclip
import warnings
from IPython.display import display
from . import gui
warnings.filterwarnings("ignore", category=DeprecationWarning)
class Assignment:
def __init__(self, from_file=False):
self.config = pd.DataFrame()
self.student_list = pd.DataFrame()
self.var_config = pd.DataFrame()
self.variables = pd.DataFrame()
self.solutions = pd.DataFrame()
self.answers = pd.DataFrame()
self.grading_config = pd.DataFrame()
self.grades = pd.DataFrame()
self.email_template = 'email_template.html'
self.grades_email_template = 'grade_email_template.html'
self._sheets = {
'config': 'configuration',
'student_list': 'students',
'var_config': 'var_config',
'variables': 'variables',
'solutions': 'solutions',
'answers': 'answers',
'grading_config': "grading_config",
'grades': 'grades'
}
if from_file:
self.load_from_file()
os.makedirs('gen', exist_ok=True)
os.makedirs('sheets', exist_ok=True)
def load_from_file(self):
""" Loads attributes from XSLX file
"""
print('------')
try:
fe = open('gen/data.xlsx', 'rb')
except FileNotFoundError:
print('Data file not found')
else:
print('Loading data')
for key in self._sheets.keys():
key_str = self._sheets[key]
try:
data = pd.read_excel(fe, sheet_name=self._sheets[key])
except ValueError as ve:
print(f'** {key_str} ... Data not loaded')
print(f'**** Error: {ve}')
else:
if not data.empty:
sheet_name = self._sheets[key]
setattr(self,
key,
pd.read_excel(fe, sheet_name=sheet_name))
print(f'-- {key_str} ... Data loaded')
else:
print(f'-- {key_str} ... There is no data in the file')
def configure(self):
""" Creates Jupyter notebook interface to populate self.config
Returns:
ipywidget: ipywigtes layout with congiguration GUI.
"""
columns = ['Variable', 'Value']
variable_names = ['Greeting',
'Assignment name',
'Assignment code',
'Course name',
'Course code',
'Professor name',
'Number of questions',
'Number of sheets',
'Password']
rows = len(variable_names)
config_table = ipysheet.sheet(rows=rows,
columns=2,
column_headers=columns,
row_headers=False)
ipysheet.column(0, variable_names, read_only=True)
if self.config.empty:
ipysheet.column(1, [''] * rows)
else:
ipysheet.column(1, self.config['Value'].to_list())
config_table.column_width = [5, 10]
config_table.layout = w.Layout(width='500px',
height='100%')
# Creates buttons
s_c_b_d = 'Save config' # Save config button description
# Save config button layout
s_c_b_l = w.Layout(width='150px', margin='10px 0 10px 0')
config_save_button = w.Button(description=s_c_b_d,
layout=s_c_b_l)
config_save_button.on_click(functools.partial(self.save_config,
config_table))
# Creates interface
gui_lay = w.Layout(margin='10px 10px 10px 10px')
conf_gui = w.VBox([config_save_button, config_table],
layout=gui_lay)
return conf_gui
def save_config(self, config, _):
""" Function to handle Save button in self.configure()
Args:
config (ipysheet table): ipysheet table with config data
_ (): Dummy variable
"""
self.config = ipysheet.to_dataframe(config)
save = []
try:
int(self.config['Value'][6])
save.append(True)
except ValueError:
print('Number of questions has to be an integer')
save.append(False)
try:
int(self.config['Value'][7])
save.append(True)
except ValueError:
print('Number of questions has to be an integer')
save.append(False)
if all(save):
self.save_file()
print('------')
print('Configuration saved')
else:
print('------')
print('Configuration not saved')
def save_data(self, table, _):
""" Saves variable configuration data
Args:
table (ipysheet table): ipysheet table with variable config data
_ (): Dummy variable
"""
self.var_config = ipysheet.to_dataframe(table)
# Deletes lines with no variable name
self.var_config = self.var_config[self.var_config['Variable'] != '']
min_value = self.var_config['Min value'].astype(float)
self.var_config['Min value'] = min_value
max_value = self.var_config['Max value'].astype(float)
self.var_config['Max value'] = max_value
self.var_config['Step'] = self.var_config['Step'].astype(float)
self.var_config['Decimals'] = self.var_config['Decimals'].astype(int)
print('------')
print('Variable generation configuration saved')
self.save_file()
def save_file(self):
""" Saves atttribute values to XLSX file
"""
try:
writer = pd.ExcelWriter("gen/data.xlsx", engine='openpyxl')
except FileNotFoundError:
print('gen folder not found')
else:
print('------')
print('Saving data file')
for key in self._sheets.keys():
try:
getattr(self, key).to_excel(writer,
self._sheets[key],
index=False)
except ValueError as ve:
print(f'** {self._sheets[key]} ... couldn´t be saved')
print(f'**** Error: {ve}')
else:
print(f'-- {self._sheets[key]} ... Saved')
writer.save()
print('------')
print('Data saved in file')
def load_students(self, csv=False, sep=";", auto_save=True):
""" Loads student list from external file
Args:
csv (bool, optional): True if file is CSV. Defaults to False.
sep (str, optional): Separator for CSV files. Defaults to ";".
auto_save (bool, optional): True for save changes automatically to
XLSX file. Defaults to True.
"""
if csv:
data_file = gui.csv_file()
self.student_list = pd.read_csv(data_file, sep)
else:
data_file = gui.excel_file()
self.student_list = pd.read_excel(data_file)
print('------')
print("Data loaded")
try:
self.add_filename()
except KeyError:
print('id column not found on file')
if auto_save:
self.save_file()
else:
print("Data not saved to file")
def add_filename(self):
""" Creates filename column in student_list DataFrame
"""
name = self.config['Value'][2]
sudent_list_str = self.student_list["id"].astype(str)
files = "sheets/" + sudent_list_str + "_" + name + ".pdf"
self.student_list['file'] = files
def config_variables(self):
""" Creates GUI for variable configuration (Jupyter)
Returns:
ipywidget: ipywidget layout
"""
# Creates tables with legible names
columns = ['Variable',
'Min value',
'Max value',
'Step',
'Decimals',
'Unit']
table = ipysheet.sheet(rows=1,
columns=6,
column_headers=columns,
row_headers=False)
if self.var_config.empty:
table = ipysheet.sheet(rows=1,
columns=6,
column_headers=columns,
row_headers=False)
values = ipysheet.row(0, ['V1', 0, 0, 0, 0, ''])
else:
rows = len(self.var_config['Variable'])
table = ipysheet.sheet(rows=rows,
columns=6,
column_headers=columns,
row_headers=False)
values = ipysheet.cell_range(self.var_config.values.tolist(),
row_start=0,
column_start=0)
# Creates buttons
add_button = w.Button(description='Add row')
add_button.on_click(functools.partial(self.add_row, table))
save_button = w.Button(description='Save')
save_button.on_click(functools.partial(self.save_data, table))
buttons_lay = w.Layout(margin='10px 0 10px ''0')
buttons = w.HBox([add_button, save_button], layout=buttons_lay)
# Creates gui
gui_lay = w.Layout(margin='10px 10px 10px 10px')
var_config_table = w.VBox([buttons, table],
layout=gui_lay)
return var_config_table
def add_row(self, table, _):
""" Function to handle Add button in self.config_variables()
Args:
table (ipysheet table): ipysheet table with config data
_ (): Dummy variable
"""
out = w.Output()
with out:
table.rows += 1
rows_str = str(table.rows)
ipysheet.row(table.rows - 1, ['V' + rows_str, 0, 0, 0, 0, ''])
def generate_variable(self, low, up, step, size, decimals):
""" Generate value for single variable
Args:
low (float): Minimum value of the variable.
up (float): Maximum value of the variable.
step (float): Difference between two consecutive generated
variables.
size (int): number of steps
decimals (int): Number of decimal positions in the generted values.
Returns:
float: variable value
"""
n = math.floor(((up - low) / step) + 1)
variable = np.random.randint(0, high=n, size=size)
variable = variable * step
variable = variable + low
return np.round(variable, decimals=decimals)
def generate_variables(self):
""" Function to generate all random variables.
"""
# Student data for sheet generation
self.variables = pd.DataFrame(self.student_list['number'])
self.variables['name'] = self.student_list['name']
for i in range(len(self.var_config)):
self.variables[self.var_config['Variable'][i]] = \
self.generate_variable(self.var_config['Min value'][i],
self.var_config['Max value'][i],
self.var_config['Step'][i],
len(self.variables),
self.var_config['Decimals'][i])
print('------')
print('Variables generated')
self.save_file()
def generate_solutions(self, solver):
""" Uses the solver() function to generate the solution list
Args:
solver (function): Function to solve an individual assignment.
"""
na = self.config['Value'][6]
self.initialize_solutions(na)
for i in range(len(self.variables)):
solver(self, i)
print('------')
print('Solutions obtained')
def initialize_solutions(self, na):
""" Initializes the DataFrame to store solutions
Args:
na (int): number for answers for each student
"""
self.solutions = pd.DataFrame(self.variables['number'])
# Initializes datafrane with Nan
n = len(self.variables)
s = np.empty([n])
s[:] = np.nan
for i in range(na):
self.solutions["ap" + str(i + 1)] = s
print('------')
print('Solutions DataFrame initialized')
def load_answers(self, date_format, sep=",", dec=".", auto=True):
""" Loads students answers in a CSV format.
Args:
date_format (str): Date format of the CSV
sep (str, optional): Element separator. Defaults to ",".
dec (str, optional): Decimal separator. Defaults to ".".
auto (bool, optional): True for automatic cleaning of answers.
Defaults to True.
"""
answers_file = gui.csv_file()
self.answers = pd.read_csv(answers_file, sep=sep, decimal=dec)
if auto:
self.clean_answers_auto(date_format)
errors = self.check_answers()
if errors.empty:
print('Answers loaded with no errors')
self.save_file()
else:
print('Found the following errors')
display(errors)
print('Answers not loaded, please fix errors and try again')
self.answers = pd.DataFrame()
def clean_answers_auto(self, date_format):
""" Function to clean the answers uploaded from the CSV to match the
format required.
Args:
date_format (str): Date format of the CSV
"""
ap = self.solutions.columns.tolist()
del ap[0:1]
columns = self.answers.columns.values.tolist()
self.answers.drop(columns[-1], inplace=True, axis=1)
self.answers.drop(columns[1], inplace=True, axis=1)
columns = self.answers.columns.values.tolist()
columns[1] = 'id'
columns[2] = 'number'
columns[3:] = ap[:]
self.answers.columns = columns
first_column = self.answers.iloc[:, 0]
self.answers['date'] = pd.to_datetime(first_column,
format=date_format)
self.answers.sort_values(['id', 'date'], inplace=True)
self.answers.drop_duplicates(subset=['id'], keep='last', inplace=True)
self.answers.sort_values('number', inplace=True)
self.answers.drop(columns[0], inplace=True, axis=1)
def config_grading(self):
"""Creates GUI for grading configuration (Jupyter)
Returns:
ipywidget: ipywidget layout
"""
ap = self.solutions.columns.tolist()
del ap[0:1]
ap.insert(0, 'Variable')
grading_configuration_table = ipysheet.sheet(rows=2,
columns=len(ap),
column_headers=ap,
row_headers=False)
ipysheet.cell(0, 0, 'Tolerance (%)', read_only=True)
ipysheet.cell(1, 0, 'Points', read_only=True)
if self.grading_config.empty:
for i in range(len(ap) - 1):
ipysheet.column(i + 1, ['', ''])
else:
for i in range(len(ap) - 1):
column_values = self.grading_config.values[:, 1:][:, i]
ipysheet.column(i + 1, column_values)
grading_configuration_table.layout = w.Layout(width='500px',
height='100%')
save_button = w.Button(description='Save config',
layout=w.Layout(width='150px',
margin='10px 0 20px 0'))
save_button.on_click(functools.partial(self.save_grading_conf,
grading_configuration_table))
gui_vBox = [save_button, grading_configuration_table]
gui_lay = w.Layout(margin='10px 10px 10px 10px')
grading_conf_gui = w.VBox(gui_vBox, layout=gui_lay)
return grading_conf_gui
def save_grading_conf(self, grading_config_table, _):
""" Function to handle save_button in config_grading()
Args:
grading_config_table (ipysheet table): ipysheet table with config
data
_ (): Dummy variable
"""
self.grading_config = ipysheet.to_dataframe(grading_config_table)
print('------')
print("Configuration saved")
self.save_file()
def grade(self, min=0, max=10, decimals=2):
""" Function to obtain students' grade
Args:
min (int, optional): Minimum grade on the scale. Defaults to 0.
max (int, optional): Maximum grade on the scale. Defaults to 10.
decimals (int, optional): Number of decimal on the grade.
Defaults to 2.
"""
correct =
|
pd.DataFrame(self.student_list[['id', 'number']])
|
pandas.DataFrame
|
from pathlib import Path
from re import A
import pandas
from repository.local_storage import LocalStorage
class Csv(LocalStorage):
def __init__(self) -> None:
super().__init__("CSV")
self.root = "./data_dumps/"
def store(self, path, data):
json =
|
pandas.DataFrame.from_dict(data, orient="index")
|
pandas.DataFrame.from_dict
|
"""Script to generate figures for Beltran & Kannan et. al.
Two figures were made by hand. Figure 1 is a pair of blender renderings. The
relevant blend file names are simply mentioned below.
Where data has to be pre-computed, the procedure is mentioned."""
import re
from pathlib import Path
import pickle
import matplotlib.cm as cm
import matplotlib.ticker as tck
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
import scipy
from scipy import stats
from scipy.optimize import curve_fit
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from nuc_chain import geometry as ncg
from nuc_chain import linkers as ncl
from nuc_chain import rotations as ncr
from MultiPoint import propagator
from nuc_chain import fluctuations as wlc
from nuc_chain import visualization as vis
from nuc_chain.linkers import convert
# Plotting parameters
#width of one column on ppt slide in inch
col_width = 5.67
full_width = 8.63
aspect_ratio = 2/3
col_height = aspect_ratio*col_width
full_height = aspect_ratio*full_width
plot_params = {
'backend': 'pdf',
'savefig.format': 'pdf',
'text.usetex': True,
'font.size': 18,
'figure.figsize': [full_width, full_height],
'figure.facecolor': 'white',
'axes.grid': False,
'axes.edgecolor': 'black',
'axes.facecolor': 'white',
'axes.titlesize': 20,
'axes.labelsize': 20,
'legend.fontsize': 18,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'axes.linewidth': 1,
'xtick.top': False,
'xtick.bottom': True,
'xtick.direction': 'out',
'xtick.minor.size': 3,
'xtick.minor.width': 0.5,
'xtick.major.pad': 5,
'xtick.major.size': 5,
'xtick.major.width': 1,
'ytick.left': True,
'ytick.right': False,
'ytick.direction': 'out',
'ytick.minor.size': 3,
'ytick.minor.width': 0.5,
'ytick.major.pad': 5,
'ytick.major.size': 5,
'ytick.major.width': 1,
'lines.linewidth': 2
}
plt.rcParams.update(plot_params)
teal_flucts = '#387780'
red_geom = '#E83151'
dull_purple = '#755F80'
rich_purple = '#e830e8'
def render_chain(linkers, unwraps=0, **kwargs):
entry_rots, entry_pos = ncg.minimum_energy_no_sterics_linker_only(linkers, unwraps=unwraps)
# on linux, hit ctrl-d in the ipython terminal but don't accept the
# "exit" prompt to get the mayavi interactive mode to work. make sure
# to use "off-screen rendering" and fullscreen your window before
# saving (this is actually required if you're using a tiling window
# manager like e.g. i3 or xmonad).
vis.visualize_chain(entry_rots, entry_pos, linkers, unwraps=unwraps, plot_spheres=True, **kwargs)
def draw_triangle(alpha, x0, width, orientation, base=10,
**kwargs):
"""Draw a triangle showing the best-fit slope on a linear scale.
Parameters
----------
alpha : float
the slope being demonstrated
x0 : (2,) array_like
the "left tip" of the triangle, where the hypotenuse starts
width : float
horizontal size
orientation : string
'up' or 'down', control which way the triangle's right angle "points"
base : float
scale "width" for non-base 10
Returns
-------
corner : (2,) np.array
coordinates of the right-angled corner of the triangle
"""
x0, y0 = x0
x1 = x0 + width
y1 = y0 + alpha*(x1 - x0)
plt.plot([x0, x1], [y0, y1], 'k')
if (alpha >= 0 and orientation == 'up') \
or (alpha < 0 and orientation == 'down'):
plt.plot([x0, x1], [y1, y1], 'k')
plt.plot([x0, x0], [y0, y1], 'k')
# plt.plot lines have nice rounded caps
# plt.hlines(y1, x0, x1, **kwargs)
# plt.vlines(x0, y0, y1, **kwargs)
corner = [x0, y1]
elif (alpha >= 0 and orientation == 'down') \
or (alpha < 0 and orientation == 'up'):
plt.plot([x0, x1], [y0, y0], 'k')
plt.plot([x1, x1], [y0, y1], 'k')
# plt.hlines(y0, x0, x1, **kwargs)
# plt.vlines(x1, y0, y1, **kwargs)
corner = [x1, y0]
else:
raise ValueError(r"Need $\alpha\in\mathbb{R} and orientation\in{'up', 'down'}")
return corner
def draw_power_law_triangle(alpha, x0, width, orientation, base=10,
**kwargs):
"""Draw a triangle showing the best-fit power-law on a log-log scale.
Parameters
----------
alpha : float
the power-law slope being demonstrated
x0 : (2,) array_like
the "left tip" of the power law triangle, where the hypotenuse starts
(in log units, to be consistent with draw_triangle)
width : float
horizontal size in number of major log ticks (default base-10)
orientation : string
'up' or 'down', control which way the triangle's right angle "points"
base : float
scale "width" for non-base 10
Returns
-------
corner : (2,) np.array
coordinates of the right-angled corner of the triangle
"""
x0, y0 = [base**x for x in x0]
x1 = x0*base**width
y1 = y0*(x1/x0)**alpha
plt.plot([x0, x1], [y0, y1], 'k')
if (alpha >= 0 and orientation == 'up') \
or (alpha < 0 and orientation == 'down'):
plt.plot([x0, x1], [y1, y1], 'k')
plt.plot([x0, x0], [y0, y1], 'k')
# plt.plot lines have nice rounded caps
# plt.hlines(y1, x0, x1, **kwargs)
# plt.vlines(x0, y0, y1, **kwargs)
corner = [x0, y1]
elif (alpha >= 0 and orientation == 'down') \
or (alpha < 0 and orientation == 'up'):
plt.plot([x0, x1], [y0, y0], 'k')
plt.plot([x1, x1], [y0, y1], 'k')
# plt.hlines(y0, x0, x1, **kwargs)
# plt.vlines(x1, y0, y1, **kwargs)
corner = [x1, y0]
else:
raise ValueError(r"Need $\alpha\in\mathbb{R} and orientation\in{'up', 'down'}")
return corner
#link_ix, unwrap_ix, rise, angle, radius = ncg.tabulate_rise(dp_f=ncg.dp_omega_exit)
def plot_fig31_rise_vs_linker_length():
fig, ax = plt.subplots(figsize=(1.2*default_width, default_height))
links = np.arange(10, 101)
#kuhns1to250 = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
#calculate the 'phi' angle corresponding to twist due to linker
phis_dp_omega_exit = np.zeros(links.size)
for i, link in enumerate(links):
dP, Onext = ncg.dp_omega_exit(link, unwrap=0)
phi, theta, alpha = ncr.phi_theta_alpha_from_R(Onext)
#record angles in units of pi
phis_dp_omega_exit[i] = phi/np.pi + 1
plt.plot(links, rise[0:91,0], linewidth=0.5)
plt.scatter(links, rise[0:91,0], c=phis_dp_omega_exit, cmap='Spectral', s=3);
plt.xlabel('Linker length (bp)')
plt.ylabel(r'Rise (nm)')
plt.subplots_adjust(left=0.1, bottom=0.19, top=0.95, right=0.97)
cb = plt.colorbar(ticks=[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2])
cb.set_label(r'$\phi$')
cb.ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\pi$'))
#cb.ax.yaxis.set_yticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2],
# [r'$0$', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$',
# r'$\frac{5\pi}{4}$', r'$\frac{3\pi}{2}$', r'$\frac{7\pi}{4}$', r'$2\pi$'])
fig.text(0.13, 0.47, r'38 bp', size=10)
fig.text(0.12, 0.57, r'36 bp', size=10)
plt.savefig('plots/thesis/fig3.1_rise-vs-linker-length.pdf')
default_lis = [36]
default_colors = [teal_flucts]
def plot_r2_homo(lis=default_lis, colors=None):
"""The r2 of the 36bp homogenous chain (0 unwrapping) compared to the
wormlike chain with the corresponding Kuhn length."""
if colors is None:
if len(lis) == 2:
colors = default_colors
else:
colors = len(lis) * [teal_flucts]
assert(len(colors) == len(lis))
fig, ax = plt.subplots(figsize=(7.79, 4.43))
x = np.logspace(0, 7, 100)
#plot rigid rod line
plt.plot(x, x, '^', markersize=3, color=red_geom)
hdfs = {}
for i, li in enumerate(lis):
hdfs[li] = pd.read_csv(f'./csvs/r2/r2-fluctuations-mu_{li}-sigma_0_10_0unwraps.csv')
try:
del hdfs[li]['Unnamed: 0']
except:
pass
hdfs[li] = hdfs[li].set_index(['variance', 'chain_id']).loc[0.0, 0.0]
hdfs[li].iloc[0,0:2] = 1 # rmax,r2 == (0,0) ==> (1,1)
plt.plot(hdfs[li]['rmax'], np.sqrt(hdfs[li]['r2']), color=colors[i])
for li in lis:
y = np.sqrt(wlc.r2wlc(x, hdfs[li]['kuhn'].mean()/2))
plt.plot(x, y, '-.', color=[0,0,0], markersize=1)
xmin = 1
ymin = xmin
ymax = 700
xmax = 3_000
# bands representing different regimes of the R^2
plt.fill_between(x, ymin, ymax, where=x<12, color=[0.96, 0.95, 0.95])
plt.fill_between(x, ymin, ymax, where=((x>=12)&(x<250)), color=[0.99, 0.99, 0.99])
plt.fill_between(x, ymin, ymax, where=x>=250, color=[0.9, 0.9, 0.91])
# power law triangle for the two extremal regimes
corner = draw_power_law_triangle(1, [np.log10(2), np.log10(3)], 0.5, 'up')
plt.text(3, 11, '$L^1$')
corner = draw_power_law_triangle(1/2, [np.log10(350), np.log10(30)], 0.8, 'down')
plt.text(700, 16, '$L^{1/2}$')
plt.xlim([xmin, xmax])
plt.ylim([ymin, ymax])
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Total linker length (nm)')
plt.ylabel(r'End-to-end distance (nm)')
legend = ['Rigid rod (0T)'] \
+ ['Fluctuations, ' + r'$L_i = ' + str(li) + r'$ bp' for li in lis] \
+ [r'WLC, best fit']
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.savefig('./plots/thesis-pres/r2_homogenous_vs_wlc.pdf', bbox_inches='tight')
def plot_kuhns_homo():
kuhns = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
fig, ax = plt.subplots(figsize=(9, 4.43))
links = np.arange(31, 52)
ax.plot(links, kuhns[links-1, 0], '--o', markersize=8, lw=3.5, color=teal_flucts)
plt.xticks(np.arange(31, 52, 2))
plt.xlim([31, 51])
plt.xlabel('Fixed linker length (bp)')
plt.ylabel('Kuhn length (nm)')
plt.tight_layout()
plt.savefig('plots/thesis-pres/kuhn_length_in_nm_31to51links_0unwraps.pdf')
def render_fig32b_chains(**kwargs):
for li in [36, 38, 41, 47]:
render_chain(14*[li], **kwargs)
def render_fig34_chains(**kwargs):
links = np.tile(38, 20)
colors = [teal_flucts, red_geom, dull_purple]
for i, unwrap in enumerate([0, 21, 42]):
col = colors[i].lstrip('#') #string of the form #hex
#convert hex color to RGB tuple of the form (0.0 <= floating point number <= 1.0, "", "")
col = tuple(int(col[i:i+2], 16)/256 for i in (0, 2, 4))
render_chain(links, unwraps=unwrap, nucleosome_color=col, **kwargs)
def plot_kuhn_hetero(mu=41):
"""use scripts/r2-tabulation.py and wlc.aggregate_existing_kuhns to create
the kuhns_so_far.csv file."""
fig, ax = plt.subplots(figsize=(7.4, 4.31))
# index: variance_type, type, mu, variance, unwrap
# columns: slope, intercept, rvalue, pvalue, stderr, b
all_kuhns = pd.read_csv('./csvs/kuhns_so_far.csv', index_col=np.arange(5))
kg = all_kuhns.loc['box', 'geometrical', mu].reset_index()
kg = kg.sort_values('variance')
ax.plot(kg['variance'].values, kg['b'].values, '--^', markersize=6, label='Zero-temperature',
color=red_geom)
kf = all_kuhns.loc['box', 'fluctuations', mu].reset_index()
kf = kf.sort_values('variance')
ax.plot(kf['variance'].values, kf['b'].values, '-o', markersize=6, label='Fluctuating',
color=teal_flucts)
rdf = pd.read_csv('./csvs/r2/r2-fluctuations-exponential-link-mu_41-0unwraps.csv')
b = rdf['kuhn'].mean()
xlim = plt.xlim()
plt.plot([-10, 50], [b, b], 'k-.', label='Exponential chain')
plt.xlim(xlim)
ax.set_ylim([0, 100])
plt.xlabel('Linker length variability $\pm\sigma$ (bp)')
plt.ylabel('Kuhn length (nm)')
plt.legend()
#fig.text(1.3, 0, r'$\pm 0 bp$', size=9)
#fig.text(1.6, 0, r'$\pm 2 bp$', size=9)
#fig.text(1.9, 0, r'$\pm 6 bp$', size=9)
# plt.subplots_adjust(left=0.07, bottom=0.15, top=0.92, right=0.97)
plt.tight_layout()
plt.savefig('./plots/thesis-pres/kuhn_length_vs_variability_41_sigma0to40.pdf',
bbox_inches='tight')
def render_fig36_chains(mu=41, sigmas=[0, 2, 6]):
for sigma in sigmas:
sign_bit = 2*np.round(np.random.rand(N)) - 1
render_chain(mu + sign_bit*np.random.randint(sigma+1), size=(N,))
def plot_r2_exponential(mu=36, colors=None):
"""The r2 of the 36bp exponential chain (0 unwrapping) compared to the
wormlike chain with the corresponding Kuhn length."""
fig, ax = plt.subplots(figsize=(4.45, 4.29))
x = np.logspace(0, 7, 100)
#plot exponential chains
rdf = pd.read_csv('./csvs/r2/r2-fluctuations-exponential-link-mu_36-0unwraps.csv')
try:
del rdf['Unnamed: 0']
except:
pass
for i, chain in rdf.groupby(['mu', 'chain_id']):
chain.iloc[0,0] = 1
chain.iloc[0,1] = 1
plt.plot(chain['rmax'], np.sqrt(chain['r2']), color=dull_purple, alpha=0.3, lw=0.5)
break
lp_bestfit = rdf['kuhn'].mean()/2
y = np.sqrt(wlc.r2wlc(x, lp_bestfit))
plt.plot(x, y, '-', color=teal_flucts)
legend = [r'Exponential, $\langle L_i \rangle= 36bp$'] \
+ [r'WLC, $b \approx 30nm$']
plt.legend(legend, bbox_to_anchor=(0, 1.02, 1, .102), loc=3, borderaxespad=0)
for i, chain in rdf.groupby(['mu', 'chain_id']):
chain.iloc[0,0] = 1
chain.iloc[0,1] = 1
plt.plot(chain['rmax'], np.sqrt(chain['r2']), color=dull_purple, alpha=0.3, lw=0.5)
plt.plot(x, y, '-', color=teal_flucts)
plt.xlabel('Total linker length (nm)')
plt.ylabel(r'$\sqrt{\langle R^2 \rangle}$')
xmin = 0.5
ymin = xmin
xmax = 100000
ymax = 10000
# power law triangle for the two extremal regimes
corner = draw_power_law_triangle(1, [np.log10(1.3), np.log10(3)], 0.8, 'up')
plt.text(2, 26, '$L^1$')
corner = draw_power_law_triangle(1/2, [np.log10(2800), np.log10(125)], 1, 'down')
plt.text(5500, 35, '$L^{1/2}$')
plt.xlim([xmin, xmax])
plt.ylim([ymin, ymax])
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Total linker length (nm)')
plt.ylabel(r'$\sqrt{\langle R^2 \rangle}$ (nm)')
plt.subplots_adjust(left=0.19, bottom=0.17, top=0.76, right=0.97)
plt.savefig('plots/thesis-pres/r2-exponential.pdf', bbox_inches='tight')
def plot_old_fig4a(ax=None):
"""The r2 of the 36bp homogenous chain (0 unwrapping) compared to the
wormlike chain with the corresponding Kuhn length."""
fig, ax = plt.subplots(figsize=(default_width, default_height))
rdf = pd.read_csv('./csvs/r2/r2-fluctuations-exponential-link-mu_36-0unwraps.csv')
try:
del rdf['Unnamed: 0']
except:
pass
for i, chain in rdf.groupby(['mu', 'chain_id']):
chain.iloc[0,0] = 1
chain.iloc[0,1] = 1
plt.plot(chain['rmax'], chain['r2'], color=dull_purple, alpha=0.4)
break
x = np.logspace(0, 7, 100)
y = wlc.r2wlc(x, rdf['kuhn'].mean()/2)
plt.plot(x, y, '-', color='k')
plt.legend([r'$\langle L_i \rangle= 36bp$', r'$WLC, l_p \approx 15 nm$'],
bbox_to_anchor=(0, 1.02, 1, .102), loc=3, borderaxespad=0)
for i, chain in rdf.groupby(['mu', 'chain_id']):
chain.iloc[0,0] = 1
chain.iloc[0,1] = 1
plt.plot(chain['rmax'], chain['r2'], color=dull_purple, alpha=0.4)
plt.plot(x, y, '-', color='k')
plt.xscale('log')
plt.yscale('log')
plt.xlim([0.5, 100000])
plt.ylim([0.5, 10000000])
plt.xlabel('Total linker length (nm)')
plt.ylabel(r'$\sqrt{\langle R^2 \rangle}$')
plt.savefig('plots/PRL/fig4a_r2_exp_vs_wlc.pdf', bbox_inches='tight')
def plot_exponential_kuhns():
fig, ax = plt.subplots(figsize=(7.4, 4.31))
kuhns = pd.read_csv('csvs/kuhns_so_far.csv')
kuhns = kuhns.set_index(['variance_type', 'type', 'mu', 'variance'])
mu_max = 100
# dotted line at 100 nm
ax.plot(np.linspace(0, mu_max, 100), np.tile(100, 100), '.',
markersize=5, label='Bare DNA', color=[0,0,0])
def make_plottable(df):
df = df.groupby('mu').mean().reset_index()
df = df[df['mu'] < mu_max].dropna()
return df
exp_fluct = kuhns.loc['exponential', 'fluctuations']
exp_fluct = make_plottable(exp_fluct)
ax.plot(exp_fluct['mu'], exp_fluct['b'], label='Exponential', color=teal_flucts)
homo_fluct = kuhns.loc['homogenous', 'fluctuations']
homo_fluct = make_plottable(homo_fluct)
ax.plot(homo_fluct['mu'], homo_fluct['b'], color=dull_purple, alpha=0.5, lw=0.75, label='Homogeneous')
#lines for yeast, mice, human
yeast = 15
mice = 45
human = 56
linelocs = [yeast, mice, human]
# ax.text(yeast+2, 6, "A")
# ax.text(mice+2, 6, "B")
# ax.text(human+2, 6, "C")
ax.vlines(linelocs, [0, 0, 0], [exp_fluct.loc[exp_fluct['mu'] == loc, 'b'].values for loc in linelocs])
#best fit line for geometrical case
# m, b, rval, pval, stderr = stats.linregress(mug, kuhnsg)
# best_fit = lambda x: m*x + b
# xvals = np.linspace(51, 100, 40)
# ax.plot(xvals, best_fit(xvals), ':', lw=0.75, color=red_geom)
plt.ylim([0, 110])
plt.legend(loc=(0.05, 0.6))
# plt.subplots_adjust(left=0.14, bottom=0.15, top=0.98, right=0.99)
plt.xlabel(r'$\langle L_i \rangle$ (bp)')
plt.ylabel(r'Kuhn length (nm)')
plt.tight_layout()
plt.savefig('plots/thesis-pres/kuhn_exponential.pdf', bbox_inches='tight')
def plot_fig39_homo_loop():
kink41 = np.load(f'csvs/Bprops/0unwraps/41link/kinkedWLC_greens_41link_0unwraps_1000rvals_50nucs.npy')
kink47 = np.load(f'csvs/Bprops/0unwraps/47link/kinkedWLC_greens_47link_0unwraps_1000rvals_50nucs.npy')
bare41 = np.load(f'csvs/Bprops/0unwraps/41link/bareWLC_greens_41link_0unwraps_1000rvals_50nucs.npy')
integrals = [kink47, kink41, bare41]
labels = ['47bp', '41bp', 'Straight chain']
links_list = [np.tile(47, 50), np.tile(41, 50), np.tile(41, 50)]
plot_prob_loop_vs_fragment_length(integrals, labels, links_list, unwrap=0, nucmin=2)
plt.subplots_adjust(left=0.17, bottom=0.20, top=0.96, right=0.97)
plt.savefig('plots/thesis/fig39_looping-homo.pdf')
def plot_prob_loop_vs_fragment_length(integrals, labels, links, unwrap, Nvals=None, nucmin=2, **kwargs):
"""Plot looping probability vs. chain length, where looping probability defined as G(0;L).
Parameters
----------
integrals : (L,) list of (rvals.size, Nvals.size) greens function arrays
list of matrices G(r; N) where columns correspond to Nvals
labels : (L,) array-like
strings corresponding to label for each greens function (printed in legend)
links : (L,) list of (num_linkers,) arrays
list of full set of linkers in each chain, where num_linkers is the total number of
nucleosomes in each chain
unwrap : float
unwrapping amount in bp. Assumes fixed unwrapping.
Nvals : array-like
number of linkers down the chain for which each green's functions in 'integrals' was calculated.
Defaults to one per monomer of the chain. Assumes Nvals is the same for all chains for which
you are plotting looping probabilities.
nucmin : float
minimum number of nucleosomes for which looping probability should be plotted. Defaults to 2,
since first nucleosome is numerically not trusted. For shorter linkers (<42bp), recommended
to set nucmin to 3 since first two points are sketchy.
"""
if Nvals is None:
Nvals = np.arange(1, len(links[0])+1)
fig, ax = plt.subplots(figsize=(default_width, 1.1*default_height))
#ignore first couple nucleosomes because of noise
indmin = nucmin-1
inds = Nvals - 1
inds = inds[inds >= indmin]
color_red = sns.color_palette("hls", 8)[0]
#HARD CODE COLOR TUPLE: #D9A725 corresponds to
#yellow = (217./255, 167./255, 37./255)
#HARD CODE COLOR TUPE: #387780 corresponds to
#teal = (56./255, 119./225, 128./255)
colors = [color_red, '#D9A725', '#387780']
for i in range(len(labels)):
ldna = convert.genomic_length_from_links_unwraps(links[i], unwraps=unwrap)
ploops = integrals[i][0, indmin:]
pldna = ldna[inds]
ax.loglog(pldna, ploops, '-o', markersize=2, linewidth=1,
color=colors[i], label=labels[i], **kwargs)
ax.legend(loc=(0.32, 0.03), frameon=False, fontsize=10)
plt.xlabel('Genomic distance (bp)')
plt.ylabel(r'$P_\mathrm{loop}\;\;\;(\mathrm{bp}^{-3})$')
def render_fig39_chains(**kwargs):
color_red = sns.color_palette("hls", 8)[0]
colors = [color_red, '#D9A725', '#387780']
for i, link in enumerate([47, 41, 41]):
col = colors[i].lstrip('#') #string of the form #hex
#convert hex color to RGB tuple of the form (0.0 <= floating point number <= 1.0, "", "")
col = tuple(int(col[i:i+2], 16)/256 for i in (0, 2, 4))
links = np.tile(link, 10)
render_chain(links, unwraps=0, nucleosome_color=col, **kwargs)
def plot_hetero_looping(df=None, rmax_or_ldna='rmax', named_sim='mu56'):
fig, ax = plt.subplots(figsize=(6.17, 4.13))
n = rmax_or_ldna
# first set sim-specific parameters, draw scaling triangles at manually
# chosen locations
if (named_sim, rmax_or_ldna) == ('mu56', 'ldna'):
draw_power_law_triangle(-3/2, x0=[3.8, -7.1], width=0.4, orientation='up')
plt.text(10**(3.95), 10**(-6.8), '$L^{-3/2}$')
# manually set thresholds to account for numerical instability at low n
min_n = 10**2.6
elif (named_sim, rmax_or_ldna) == ('mu56', 'rmax'):
draw_power_law_triangle(-3/2, x0=[3.0, -7.5], width=0.4, orientation='up')
plt.text(10**3.1, 10**(-7.3), '$L^{-3/2}$')
min_n = 10**2.2
elif (named_sim, rmax_or_ldna) == ('links31-to-52', 'rmax'):
draw_power_law_triangle(-3/2, x0=[3.0, -7.5], width=0.4, orientation='up')
plt.text(10**3.1, 10**(-7.3), '$L^{-3/2}$')
min_n = 10**2.0
elif (named_sim, rmax_or_ldna) == ('links31-to-52', 'ldna'):
draw_power_law_triangle(-3/2, x0=[3.5, -7], width=0.4, orientation='up')
plt.text(10**3.6, 10**(-6.8), '$L^{-3/2}$')
min_n = 10**2.5
if df is None:
df = load_looping_statistics_heterogenous_chains(named_sim=named_sim)
# if the first step is super short, we are numerically unstable
df.loc[df['rmax'] <= 5, 'ploops'] = np.nan
# if the output is obviously bad numerics, ignore it
df.loc[df['ploops'] > 10**(-4), 'ploops'] = np.nan
df.loc[df['ploops'] < 10**(-13), 'ploops'] = np.nan
df = df.dropna()
df = df.sort_values(n)
df_int = df.groupby(['num_nucs', 'chain_id']).apply(interpolated_ploop,
rmax_or_ldna=rmax_or_ldna, n=np.logspace(np.log10(min_n), np.log10(df[n].max()), 1000))
df_int_ave = df_int.groupby(n+'_interp')['ploops_interp'].agg(['mean', 'std', 'count'])
df_int_ave = df_int_ave.reset_index()
xgrid = df_int_ave[n+'_interp'].values
y_pred = df_int_ave['mean'].values
sig = df_int_ave['std'].values/np.sqrt(df_int_ave['count'].values - 1)
# 95% joint-confidence intervals, bonferroni corrected
ste_to_conf = scipy.stats.norm.ppf(1 - (0.05/1000)/2)
# plot all the individual chains, randomly chop some down to make plot look
# nicer
palette = sns.cubehelix_palette(n_colors=len(df.groupby(['num_nucs', 'chain_id'])))
ord = np.random.permutation(len(palette))
for i, (label, chain) in enumerate(df.groupby(['num_nucs', 'chain_id'])):
num_nucs = int(label[0])
max_nuc_to_plot = num_nucs*(1 - 0.2*np.random.rand())
chain = chain[chain['nuc_id'] <= max_nuc_to_plot]
chain = chain[chain[n] >= min_n]
plt.plot(chain[n].values, chain['ploops'].values,
c=palette[ord[i]], alpha=0.15, lw=0.5, label=None)
# bold a couple of the chains
bold_c = palette[int(9*len(palette)/10)]
if named_sim == 'mu56':
chains_to_bold = [(100,1), (50,120), (100,112)]
elif named_sim == 'links31-to-52':
chains_to_bold = [(50, 1), (50, 3), (50, 5)]
min_n = 10**2.7
for chain_id in chains_to_bold:
chain = df.loc[chain_id]
chain = chain[chain[n] >= min_n]
#plt.plot(chain[n].values, chain['ploops'].values, c=bold_c, alpha=0.6,
# label=None)
fill = plt.fill_between(xgrid,
y_pred - ste_to_conf*sig,
y_pred + ste_to_conf*sig,
alpha=.10, color='r')
plt.plot(xgrid, y_pred, 'r-', label='Average $\pm$ 95\%')
# load in the straight chain, in [bp] (b = 100nm/ncg.dna_params['lpb'])
bare_n, bare_ploop = wlc.load_WLC_looping()
# now rescale the straight chain to match average
if named_sim == 'mu56':
b = 40.67 # nm
k = b/100 # scaling between straight and 56bp exponential chain
nn = 146/56 # wrapped amount to linker length ratio
elif named_sim == 'links31-to-52':
b = 2*13.762 # nm
k = b/100 # scaling between straight and uniform chain
nn = 146/41.5
if rmax_or_ldna == 'ldna':
# we use the fact that (e.g. for exp_mu56, 0 unwraps)
# df['ldna'] = df['rmax'] + 146*df['nuc_id']
# (on ave) = df['rmax'] + 146*df['rmax']/56
bare_n = bare_n*(1 + nn)
x, y = bare_n*k, bare_ploop/k**3,
lnormed = plt.plot(x[x >= min_n], y[x >= min_n],
'k-.', label=f'Straight chain, b={b:0.1f}nm')
# also plot just the bare WLC
b = 2*wlc.default_lp
l100 = plt.plot(bare_n[bare_n>=min_n], bare_ploop[bare_n>=min_n], '-.', c=teal_flucts,
label=f'Straight chain, b=100nm')
# plt.plot(bare_n, wlc.sarah_looping(bare_n/2/wlc.default_lp)/(2*wlc.default_lp)**2)
plt.xlim([10**(np.log10(min_n)*1), 10**(np.log10(np.max(df[n]))*0.99)])
if rmax_or_ldna == 'rmax':
plt.ylim([10**(-11), 10**(-6)])
elif rmax_or_ldna == 'ldna':
plt.ylim([10**(-13), 10**(-5)])
plt.tick_params(axis='y', which='minor', left=False)
if rmax_or_ldna == 'rmax':
plt.xlabel('Total linker length (bp)')
elif rmax_or_ldna == 'ldna':
plt.xlabel('Genomic distance (bp)')
plt.ylabel(r'$P_\mathrm{loop}\;\;\;(\mathrm{bp}^{-3})$')
# plt.legend([fill, l100, lnormed], ['Average $\pm$ 95\%',
# 'Straight chain, b=100nm', f'Straight chain, b={b:0.2f}nm'],
plt.legend(loc='lower right')
plt.yscale('log')
plt.xscale('log')
plt.subplots_adjust(left=0.17, bottom=0.17, top=0.96, right=0.97)
#plt.subplots_adjust(left=0.12, bottom=0.13, top=0.96, right=0.99)
plt.tight_layout()
#plt.savefig(f'plots/thesis-pres/looping_{named_sim}_{rmax_or_ldna}.pdf', bbox_inches='tight')
def interpolated_ploop(df, rmax_or_ldna='ldna', n=np.logspace(2, 5, 1000),
ploop_col='ploops'):
"""Function to apply to the looping probabilities of a given chain to
resample it to a fixed set of values."""
n_col = rmax_or_ldna
n = n[(n >= df[n_col].min()) & (n <= df[n_col].max())]
ploop = np.interp(n, df[n_col].values, df[ploop_col].values,
left=df[ploop_col].values[0], right=df[ploop_col].values[-1])
return pd.DataFrame(np.stack([n, ploop]).T, columns=[n_col+'_interp', ploop_col+'_interp'])
def load_looping_statistics_heterogenous_chains(*, dir=None, file_re=None, links_fmt=None, greens_fmt=None, named_sim=None):
"""Load in looping probabilities for all example chains of a given type
done so far.
Specify how to find the files via the directory dir, a regex that can
extract the "num_nucs" and "chain_id" from the folder name, a format string that
expands num_nucs, chain_id into the file name holding the linker lengths
for that chain, and another format string that expands into the filename
holding the greens function.
OR: just pass named_sim='mu56' or 'links31-to-52' to load in exponential chains with
mean linker length 56 or uniform linker chain with lengths from 31-52,
resp.
"""
if named_sim is not None:
file_re = re.compile("([0-9]+)nucs_chain([0-9]+)")
links_fmt = 'linker_lengths_{num_nucs}nucs_chain{chain_id}_{num_nucs}nucs.npy'
greens_fmt = 'kinkedWLC_greens_{num_nucs}nucs_chain{chain_id}_{num_nucs}nucs.npy'
if named_sim == 'mu56':
#directory in which all chains are saved
loops_dir = Path('csvs/Bprops/0unwraps/heterogenous/exp_mu56')
elif named_sim == 'links31-to-52':
loops_dir = Path('csvs/Bprops/0unwraps/heterogenous/links31to52')
else:
raise ValueError('Unknown sim type!')
cache_csv = Path(loops_dir/f'looping_probs_heterochains_{named_sim}_0unwraps.csv')
if cache_csv.exists():
df =
|
pd.read_csv(cache_csv)
|
pandas.read_csv
|
from selenium import webdriver
import pandas as pd
import shelve
from auth.login import login
from crawl.page_users import get_page_users_links
from crawl.page_users import get_page_users_data
choice=input('Keep Working Hidden? (y/n)\n').lower()
if choice=='y':
opt=webdriver.ChromeOptions()
opt.add_argument('headless')
elif choice=='n':
opt=None
driver=webdriver.Chrome("./driver/chromedriver.exe",chrome_options=opt)
links_file=shelve.open('./data/links/links')
#Logging into LinkedIn account
driver.get(links_file['login'])
print("login called")
driver=login(driver)
print("""
Scrape:
1. Your Connections details
2. Company details (followed by you)
3. Company Employees data (followed by you)
""")
choice=int(input())
if choice==1:
page_users_links=get_page_users_links(driver,links_file['connections'])
user_data=get_page_users_data(driver,page_users_links)
|
pd.DataFrame(user_data)
|
pandas.DataFrame
|
import json
import pandas as pd
from .. import Client as VanillaClient
from .. import Time
from ..constants import DEFAULT_DECISION_TREE_VERSION
from ..errors import CraftAiBadRequestError
from ..types import GENERATED_TIME_TYPES
from .interpreter import Interpreter
from .utils import format_input, is_valid_property_value, create_timezone_df
def chunker(to_be_chunked_df, chunk_size):
return (
to_be_chunked_df[pos : pos + chunk_size]
for pos in range(0, len(to_be_chunked_df), chunk_size)
)
class Client(VanillaClient):
"""Client class for craft ai's API using pandas dataframe types"""
def add_agent_operations(self, agent_id, operations, useWebSocket=False):
if isinstance(operations, pd.DataFrame):
if not isinstance(operations.index, pd.DatetimeIndex):
raise CraftAiBadRequestError(
"Invalid dataframe given, it is not time indexed."
)
if operations.index.tz is None:
raise CraftAiBadRequestError(
"""tz-naive DatetimeIndex are not supported,
it must be tz-aware."""
)
agent = super(Client, self).get_agent(agent_id)
operations = operations.copy(deep=True)
tz_col = [
key
for key, value in agent["configuration"]["context"].items()
if value["type"] == "timezone"
]
if tz_col:
tz_col = tz_col[0]
operations[tz_col] = create_timezone_df(operations, tz_col).iloc[:, 0]
chunk_size = self.config["operationsChunksSize"]
for chunk in chunker(operations, chunk_size):
chunk_operations = [
{
"timestamp": row.name.value
// 10 ** 9, # Timestamp.value returns nanoseconds
"context": {
col: format_input(row[col])
for col in chunk.columns
if is_valid_property_value(col, row[col])
},
}
for _, row in chunk.iterrows()
]
super(Client, self).add_agent_operations(
agent_id, chunk_operations, useWebSocket
)
return {
"message": 'Successfully added %i operation(s) to the agent "%s/%s/%s" context.'
% (
len(operations),
self.config["owner"],
self.config["project"],
agent_id,
)
}
else:
return super(Client, self).add_agent_operations(agent_id, operations)
def add_agents_operations_bulk(self, payload):
"""Add operations to a group of agents.
:param list payload: contains the informations necessary for the action.
It's in the form [{"id": agent_id, "operations": operations}]
With id that is an str containing only characters in "a-zA-Z0-9_-"
and must be between 1 and 36 characters. It must referenced an
existing agent.
With operations either a list of dict or a DataFrame that has
the form given in the craft_ai documentation and the configuration of
the agent.
:return: list of agents containing a message about the added
operations.
:rtype: list of dict.
:raises CraftAiBadRequestError: if all of the ids are invalid or
referenced non existing agents or one of the operations is invalid.
"""
# Check all ids, raise an error if all ids are invalid
valid_indices, _, _ = self._check_entity_id_bulk(
payload, check_serializable=False
)
valid_payload = [payload[i] for i in valid_indices]
new_payload = []
for agent in valid_payload:
operations = agent["operations"]
agent_id = agent["id"]
if isinstance(operations, pd.DataFrame):
if not isinstance(operations.index, pd.DatetimeIndex):
raise CraftAiBadRequestError(
"Invalid dataframe given for agent "
"{}, it is not time indexed.".format(agent_id)
)
if operations.index.tz is None:
raise CraftAiBadRequestError(
"tz-naive DatetimeIndex are not supported for "
"agent {}, it must be tz-aware.".format(agent_id)
)
agent = super(Client, self).get_agent(agent_id)
tz_col = [
key
for key, value in agent["configuration"]["context"].items()
if value["type"] == "timezone"
]
if tz_col:
tz_col = tz_col[0]
operations[tz_col] = create_timezone_df(operations, tz_col).iloc[
:, 0
]
new_operations = [
{
"timestamp": row.name.value
// 10 ** 9, # Timestamp.value returns nanoseconds
"context": {
col: format_input(row[col])
for col in operations.columns
if is_valid_property_value(col, row[col])
},
}
for _, row in operations.iterrows()
]
new_payload.append({"id": agent_id, "operations": new_operations})
elif isinstance(operations, list):
# Check if the operations are serializable
json.dumps([agent])
new_payload.append({"id": agent_id, "operations": operations})
else:
raise CraftAiBadRequestError(
"The operations are not put in a DataFrame or a list"
"of dict form for the agent {}.".format(agent_id)
)
return super(Client, self).add_agents_operations_bulk(new_payload)
def get_agent_operations(self, agent_id, start=None, end=None):
operations_list = super(Client, self).get_agent_operations(agent_id, start, end)
return pd.DataFrame(
[operation["context"] for operation in operations_list],
index=pd.to_datetime(
[operation["timestamp"] for operation in operations_list], unit="s"
).tz_localize("UTC"),
)
def get_agent_states(self, agent_id, start=None, end=None):
states = super(Client, self).get_agent_states(agent_id, start, end)
return pd.DataFrame(
[state["sample"] for state in states],
index=pd.to_datetime(
[state["timestamp"] for state in states], unit="s"
).tz_localize("UTC"),
)
@staticmethod
def check_decision_context_df(contexts_df):
if isinstance(contexts_df, pd.DataFrame):
if contexts_df.empty:
raise CraftAiBadRequestError(
"Invalid dataframe given, dataframe is empty."
)
if not isinstance(contexts_df.index, pd.DatetimeIndex):
raise CraftAiBadRequestError(
"Invalid dataframe given, it is not time indexed."
)
if contexts_df.index.tz is None:
raise CraftAiBadRequestError(
"""tz-naive DatetimeIndex are not supported,
it must be tz-aware."""
)
else:
raise CraftAiBadRequestError("Invalid data given, it is not a DataFrame.")
@staticmethod
def decide_from_contexts_df(tree, contexts_df):
Client.check_decision_context_df(contexts_df)
return Interpreter.decide_from_contexts_df(tree, contexts_df)
def get_agent_decision_tree(
self, agent_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION
):
# Convert pandas timestamp to a numerical timestamp in seconds
if isinstance(timestamp, pd.Timestamp):
timestamp = timestamp.value // 10 ** 9
return super(Client, self).get_agent_decision_tree(agent_id, timestamp, version)
def get_generator_decision_tree(
self, generator_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION
):
# Convert pandas timestamp to a numerical timestamp in seconds
if isinstance(timestamp, pd.Timestamp):
timestamp = timestamp.value // 10 ** 9
return super(Client, self).get_generator_decision_tree(
generator_id, timestamp, version
)
def get_generator_operations(self, generator_id, start=None, end=None):
# Convert pandas timestamp to a numerical timestamp in seconds
if isinstance(start, pd.Timestamp):
start = start.value // 10 ** 9
if isinstance(end, pd.Timestamp):
end = end.value // 10 ** 9
operations_list = super(Client, self).get_generator_operations(
generator_id, start, end
)
# convert List to DataFrame with a column for each context property
df = pd.json_normalize(operations_list)
return df
def _generate_decision_df_and_tz_col(self, entity_id, contexts_df, configuration):
df = contexts_df.copy(deep=True)
tz_col = [
key
for key, value in configuration["context"].items()
if value["type"] == "timezone"
]
if tz_col:
tz_col = tz_col[0]
df[tz_col] = create_timezone_df(contexts_df, tz_col).iloc[:, 0]
return df, tz_col
def _generate_time_from_context(self, params):
time = Time(
t=params["context_ops"][0].value
// 1000000000, # Timestamp.value returns nanoseconds
timezone=getattr(params["context_ops"], params["tz_col"])
if params["tz_col"]
else params["context_ops"][0].tz,
)
return time
def _generate_decision_context(self, params, context, time):
configuration = params["configuration"]
if configuration != {}:
context_result = Interpreter._rebuild_context(configuration, context, time)
context = context_result["context"]
else:
context = Interpreter.join_decide_args((context, time))
# Convert timezones as integers into standard +/hh:mm format
# This should only happen when no time generated value is required
decide_context = Interpreter._convert_timezones_to_standard_format(
configuration, context.copy()
)
return decide_context
def _format_context(self, params):
context = {}
for feature in params["feature_names"]:
# Skip generated features in the context
feature_configuration = params["configuration"]["context"][feature]
has_generated_key = "is_generated" in list(feature_configuration.keys())
if feature_configuration["type"] in GENERATED_TIME_TYPES and (
(has_generated_key and feature_configuration["is_generated"])
# If is_generated is not given, by default it is considered True
or not has_generated_key
):
continue
value = getattr(params["context_ops"], feature)
if is_valid_property_value(feature, value):
context[feature] = format_input(value)
return context
def _pandas_agent_boosting_decide_from_df(
self, agent_id, from_ts, to_ts, params, df
):
decisions_payload = []
for row in df.itertuples(name="column_names"):
params["context_ops"] = row
context = self._format_context(params)
time = self._generate_time_from_context(params)
decide_context = self._generate_decision_context(params, context, time)
decisions_payload.append(
{
"entityName": agent_id,
"timeWindow": [from_ts, to_ts],
"context": decide_context,
}
)
decisions = super(Client, self).get_agent_bulk_boosting_decision(
decisions_payload
)
output_name = params["configuration"]["output"][0]
return (
{
"{}_predicted_value".format(output_name): decision["output"][
"predicted_value"
]
}
for decision in decisions
)
def decide_boosting_from_contexts_df(self, agent_id, from_ts, to_ts, contexts_df):
predictions_df_list = []
Client.check_decision_context_df(contexts_df)
configuration = self.get_agent(agent_id)["configuration"]
feature_names = [
feature
for feature in configuration["context"].keys()
if feature not in configuration["output"]
]
chunk_size = self.config["operationsChunksSize"]
for chunk in chunker(contexts_df, chunk_size):
df, tz_col = self._generate_decision_df_and_tz_col(
agent_id, chunk, configuration
)
predictions_iter = self._pandas_agent_boosting_decide_from_df(
agent_id,
from_ts,
to_ts,
{
"configuration": configuration,
"feature_names": feature_names,
"tz_col": tz_col,
},
df,
)
predictions_df = pd.DataFrame(predictions_iter, index=chunk.index)
predictions_df_list.append(predictions_df)
return predictions_df_list[0].append(predictions_df_list[1:])
def _pandas_generator_boosting_decide_from_df(
self, generator_id, from_ts, to_ts, params, df
):
decisions_payload = []
for row in df.itertuples(name="column_names"):
params["context_ops"] = row
context = self._format_context(params)
time = self._generate_time_from_context(params)
decide_context = self._generate_decision_context(params, context, time)
decisions_payload.append(
{
"entityName": generator_id,
"timeWindow": [from_ts, to_ts],
"context": decide_context,
}
)
decisions = super(Client, self).get_generator_bulk_boosting_decision(
decisions_payload
)
output_name = params["configuration"]["output"][0]
return (
{
"{}_predicted_value".format(output_name): decision["output"][
"predicted_value"
]
}
for decision in decisions
)
def decide_generator_boosting_from_contexts_df(
self, generator_id, from_ts, to_ts, contexts_df
):
predictions_df_list = []
Client.check_decision_context_df(contexts_df)
configuration = self.get_generator(generator_id)["configuration"]
feature_names = [
feature
for feature in configuration["context"].keys()
if feature not in configuration["output"]
]
chunk_size = self.config["operationsChunksSize"]
for chunk in chunker(contexts_df, chunk_size):
df, tz_col = self._generate_decision_df_and_tz_col(
generator_id, chunk, configuration
)
predictions_iter = self._pandas_generator_boosting_decide_from_df(
generator_id,
from_ts,
to_ts,
{
"configuration": configuration,
"feature_names": feature_names,
"tz_col": tz_col,
},
df,
)
predictions_df =
|
pd.DataFrame(predictions_iter, index=chunk.index)
|
pandas.DataFrame
|
#tbox_pipeline_master.py
#By <NAME>
#Reads INFERNAL output data and calculates T-box features
#Also calculates thermodynamic parameters (code by <NAME>)
import sys
import re
import pandas as pd
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import subprocess
#Function to read an INFERNAL output file and extract sequence names, metadata, structure, and sequence
#Metadata is as described in the INFERNAL manual
def read_INFERNAL(file):
metadata = []
Tbox_start = -1
Tbox_end = -1
sp = ""
sp_start = ""
metadataLine = -1
structLine = -1
seqLine = -1
Tboxes = {'Name':[], 'Rank':[], 'E_value':[], 'Score':[], 'Bias':[], 'Tbox_start':[],'Tbox_end':[],
'Strand':[], 'CM_accuracy':[], 'GC':[], 'Sequence':[], 'Structure':[]}
with open(file) as f:
for lineCount, line in enumerate(f):
if re.match(">>", line) is not None: #We found a match!
Tboxes['Name'].append(line.split(" ")[1])
metadataLine = lineCount + 3
structLine = lineCount + 6
seqLine = lineCount + 9
if lineCount == metadataLine:
metadata = list(filter(None, line.split(' '))) #Splits by spaces, and strips empty strings
Tboxes['Rank'].append(int(metadata[0][1:len(metadata[0])-1])) #All except first and last characters
Tboxes['E_value'].append(float(metadata[2])) #third entry
Tboxes['Score'].append(float(metadata[3])) #fourth
Tboxes['Bias'].append(float(metadata[4])) #fifth
Tbox_start = metadata[9]
Tboxes['Tbox_start'].append(int(Tbox_start))
Tbox_end = metadata[10]
Tboxes['Tbox_end'].append(int(Tbox_end))
Tboxes['Strand'].append(metadata[11])
Tboxes['CM_accuracy'].append(float(metadata[13]))
Tboxes['GC'].append(float(metadata[15][0:4])) #ignore the \n at the end
if lineCount == structLine:
sp = list(filter(None,line.split(' ')))
structure = sp[0]
Tboxes['Structure'].append(structure) #Take the second-to-last one
if lineCount == seqLine:
seq_line = list(filter(None, line.strip().split(' ')))
sequence = ' '.join(seq_line[2:len(seq_line) - 1])
#Fallback method (adapted from <NAME> code):
if sequence not in line: #This can happen if there are two consecutive spaces in the middle of the sequence.
rsp = line.rsplit(Tbox_end, maxsplit = 1)
lsp = rsp[0].split(Tbox_start + ' ')
sequence = lsp[len(lsp) - 1].strip()
print("Fallback on line %d" % lineCount)
#Do a sanity check
if len(sequence) != len(structure): #This is an error!
print("\nParsing ERROR occured on line %d" % lineCount)
print(line) #For debug purposes
print(sequence)
print(structure)
print(seq_line)
#sequence = "ERROR"
Tboxes['Sequence'].append(sequence)
return pd.DataFrame(Tboxes)
#Function to find features of a T-box given the secondary structure
#Parameters:
#seq is the sequence containing the T-box
#struct is the secondary structure
#start is the position of the T-box within the structure (default = 1)
#Output:
#Stem 1 start, stem 1 specifier loop, codon, stem 1 end, antiterminator start, discriminator, antiterminator end
def tbox_features(seq, struct, offset = 0):
warnings = ""
codon_region = ""
if len(seq)!=len(struct):
raise RuntimeError("Sequence length (%d) is not equal to structure length (%d)" % (len(seq), len(struct)))
if not (struct.startswith(':') or struct.startswith('<')):
warnings += "BAD_SEC_STRUCT;"
#Find the Stem 1 start
m = re.search('<',struct)
if m is not None:
s1_start = m.start() #Gets the first '<'
else:
s1_start = -1
warnings += "NO_STEM1_START;"
#Find the Stem 1 end
m = re.search('>\.*,',struct)
if m is not None:
s1_end = m.start() #Gets the first '>,' or possibly '>.,'
else:
s1_end = -1
warnings += "NO_STEM1_END;"
#Find the Stem 1 specifier loop start
matches = [m.start() for m in re.finditer('>\.*-', struct)]
if len(matches) > 1:
s1_loop_start = matches[1] #Gets the second occurrence of '>-' or possibly '>.-'
#Find the Stem 1 specifier end
matches = [m.end() for m in re.finditer('-\.*>', struct)]
if len(matches) > 1:
s1_loop_end = matches[1] #Gets the second occurrence of '->' or possibly '-.>'
else:
s1_loop_end = -1
warnings += "NO_SPEC_END;"
else:
s1_loop_start = -1
warnings += "NO_SPEC_START;"
#Use fallback method to find Stem 1 specifier end
s1_loop_end = -1
for m in re.finditer('-\.*>', struct):
end = m.end()
if end > s1_loop_start and end < s1_end - 1: #The last loop STRICTLY before the stem 1 end
s1_loop_end = end
if s1_loop_end == -1: warnings += "NO_SPEC_END;"
#Check to see if the Stem 1 has truncations
if '~' in struct[s1_start:s1_end+1]: #there is a truncation
warnings += "TRUNCATED_STEM_1;"
#Recalculate Stem 1 loop end
matches = [m.end() for m in re.finditer('-\.*>', struct[:s1_end+1])]
if len(matches) > 1: #there should be at least 2
s1_loop_end = matches[-2] #get the second to last
if s1_loop_end == -1:
warnings += "NO_SPEC_END;"
else: #Recalculate Stem 1 loop start
matches = [m.start() for m in re.finditer('>\.*-', struct[:s1_loop_end+1])]
if len(matches) >= 1:
s1_loop_start = matches[-1] #get the last one before the Stem 1 loop end
if s1_loop_end > s1_loop_start: #Sanity check
#Read the codon
codon = seq[s1_loop_end - 5: s1_loop_end - 2]
#Check the codon
if re.search('[^AaCcGgUu]', codon) is not None:
warnings += "BAD_CODON;"
else: #Assign the codon region
minus_one_pos = s1_loop_end - 6
while minus_one_pos > 0 and re.match('[^AaCcGgUu]', seq[minus_one_pos]) is not None:
minus_one_pos -= 1 #Look for the first ACGU character before the codon
plus_one_pos = s1_loop_end - 2
while plus_one_pos < len(seq) - 1 and re.match('[^AaCcGgUu]', seq[plus_one_pos]) is not None:
plus_one_pos += 1 #Look for the first ACGU character after the codon
codon_region = seq[minus_one_pos] + codon + seq[plus_one_pos] #Get the surrounding region too, for +1/-1 predictions
else:
codon = ""
warnings += "NO_CODON;"
#Find the antiterminator start
antiterm_list = [m.start() for m in re.finditer(',\.*<', struct)] #Makes a list of all occurences of ',<'
if len(antiterm_list) > 0:
antiterm_start = antiterm_list[len(antiterm_list)-1] #Gets the last one
discrim_start = struct.find('---', antiterm_start + 3) #Find the loop containing the discriminator
discrim_end = discrim_start + 4
discrim = seq[discrim_start:discrim_end]
else:
antiterm_start = -1
discrim_start = -1
discrim_end = -1
discrim = ""
warnings += "NO_ANTITERM_START;"
#Check the discriminator
if not discrim.startswith('UGG') or re.search('[^AaCcGgUu]', discrim) is not None:
warnings += "BAD_DISCRIM;"
#Find the antiterminator
match = re.search('>\.*:',struct)
if match is not None: #Sometimes the antiterminator end is missing from the sequence
antiterm_end = match.start()
else: #Simply get the last '>'
end_list = [m.start() for m in re.finditer('>', struct)]
if len(end_list) > 0:
antiterm_end = end_list[len(end_list)-1]
else:
antiterm_end = -1
warnings += "NO_ANTITERM_END;"
#Adjust values based on offset
s1_start += offset
s1_loop_start += offset + 1
s1_loop_end += offset - 1
s1_end += offset
antiterm_start += offset + 1
antiterm_end += offset
discrim_start += offset
discrim_end += offset - 1
#Return a tuple with the features identified
return (s1_start, s1_loop_start, s1_loop_end, codon, s1_end, antiterm_start, discrim, antiterm_end, codon_region, warnings, discrim_start, discrim_end)
#Convert between position in INFERNAL output and fasta sequence
#Count the gaps and adjust for them
#Returns a mapping between INFERNAL position and fasta position
def map_fasta(seq, fasta, offset = 0, allowed = 'AaGgCcUu'):
#Initialize counters
count_fasta = offset
parse_buffer = ""
parsing = False
mapping = []
for c in seq:
mapping.append(count_fasta)
if parsing:
if c == ']': #The end of the numerical gap
parsing = False
count_fasta += int(parse_buffer.strip()) #Parse the value
#print(parse_buffer) #debug
parse_buffer = "" #reset buffer for the next gap
else:
parse_buffer += c #Add the character to the parse buffer
elif c in allowed:
count_fasta += 1
elif c == '[': # The start of a numerical gap
parsing = True
return mapping
#Function to find the end of the terminator (last occurence of TTTTT in the fasta sequence)
def term_end(sequence, start, pattern = 'TTTTT'):
match = sequence.rfind(pattern, start) #get the last occurence of the pattern, after the start
if match > 0:
return match + len(pattern) #- 1
return len(sequence) #fallback: return the end
#Function to compute derived T-box features from the prediction
#Note: tboxes must contain fasta sequences!
def tbox_derive(tboxes):
#Derive more features for visualization
#ALSO: Handle negative-strand T-boxes
for i in range(len(tboxes['FASTA_sequence'])):
fasta = tboxes['FASTA_sequence'][i]
print('Mapping ' + tboxes['Name'][i]) #debug
seq = tboxes['Sequence'][i]
if isinstance(seq, str): #sanity check. Skip NaN
#Check if the T-box is on the NEGATIVE strand
if tboxes['Tbox_start'][i] > tboxes['Tbox_end'][i]:
print("Converting – strand to +: " + tboxes['Name'][i])
#Convert name
split_name = tboxes['Name'][i].split(':')
seq_start = split_name[1].split('-')[0]
seq_end = split_name[1].split('-')[1]
tboxes.at[tboxes.index[i], 'Name'] = split_name[0] + ':' + seq_end + '-' + seq_start
#Convert FASTA sequence
sequence = Seq(fasta, generic_dna)
tboxes.at[tboxes.index[i], 'FASTA_sequence'] = str(sequence.reverse_complement())
#Convert T-box start and end (since these are FASTA-relative)
#Other features, which are INFERNAL-relative, should not be converted yet
tboxes.at[tboxes.index[i], 'Tbox_start'] = len(fasta) - tboxes['Tbox_start'][i] + 1
tboxes.at[tboxes.index[i], 'Tbox_end'] = len(fasta) - tboxes['Tbox_end'][i] + 1
print("Conversion complete. New name is: " + tboxes['Name'][i])
#Create mapping between INFERNAL sequence and FASTA sequence
mapping = map_fasta(seq, tboxes['FASTA_sequence'][i], offset = tboxes['Tbox_start'][i] - 1)
#Update the positions of existing features.
s1_start = int(tboxes['s1_start'][i])
if s1_start > 0:
tboxes.at[tboxes.index[i], 's1_start'] = mapping[s1_start]
s1_loop_start = int(tboxes['s1_loop_start'][i])
if s1_loop_start > 0:
tboxes.at[tboxes.index[i], 's1_loop_start'] = mapping[s1_loop_start]
s1_loop_end = int(tboxes['s1_loop_end'][i])
if s1_loop_end > 0:
if s1_loop_end < len(mapping):
tboxes.at[tboxes.index[i], 's1_loop_end'] = mapping[s1_loop_end]
#Calculate codon range
tboxes.at[tboxes.index[i], 'codon_start'] = mapping[s1_loop_end - 4]
tboxes.at[tboxes.index[i], 'codon_end'] = mapping[s1_loop_end - 2]
else:
print("Warning: mapping error for s1_loop_end:")
print(s1_loop_end)
print(len(mapping))
print(mapping)
s1_end = int(tboxes['s1_end'][i])
if s1_end > 0:
tboxes.at[tboxes.index[i], 's1_end'] = mapping[s1_end]
aterm_start = int(tboxes['antiterm_start'][i])
if aterm_start > 0:
tboxes.at[tboxes.index[i], 'antiterm_start'] = mapping[aterm_start]
#Calculate discriminator range
discrim_start = int(tboxes['discrim_start'][i])
if discrim_start > 0:
tboxes.at[tboxes.index[i], 'discrim_start'] = mapping[discrim_start]
tboxes.at[tboxes.index[i], 'discrim_end'] = mapping[discrim_start + 3] #+3 because inclusive
aterm_end = int(tboxes['antiterm_end'][i])
if aterm_end > 0:
aterm_end = min(aterm_end, len(mapping)-1)
tboxes.at[tboxes.index[i], 'antiterm_end'] = mapping[aterm_end]
#Calculate terminator end
tboxes.at[tboxes.index[i], 'term_end'] = term_end(tboxes['FASTA_sequence'][i], int(tboxes['antiterm_end'][i]))
return tboxes
def term_end_regex(sequence, start, pattern = '[T]{3,}[ACGT]{,1}[T]{1,}[ACGT]{,1}[T]{1,}'):
print(start)
if pd.isna(start):
return len(sequence)
matches = [m.end() for m in re.finditer(pattern, sequence[start:])]
if len(matches)>0:
return start + matches[0]
return len(sequence) #fallback: return the end
#RNAfold on target sequence
def get_fold(sequence):
#Initialize outputs
structure = ""
energy = ""
errors = ""
vienna_args = ['RNAfold', '-T', '37', '--noPS'] # arguments used to call RNAfold at 37 degrees
vienna_input = str(sequence) # the input format
vienna_call = subprocess.run(vienna_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE, input = vienna_input, encoding = 'ascii')
output = vienna_call.stdout.split('\n')
if len(output) > 1: # if there is a result
output = output[-2]
output = output.split()
structure = output[0] # gets last line's structure (always will be first element when sliced)
energy = output[-1].replace(')', '').replace('(', '') # get energy (always will be last element in slice)
errors = vienna_call.stderr.replace('\n',' ')
return structure, energy, errors
#RNAfold on target sequence, with constraints
def get_fold_constraints(sequence, structure):
#Initialize outputs
energy = ""
errors = ""
structure_out = ""
vienna_args = ['RNAfold', '-T', '37', '-C', '--noPS'] # arguments used to call RNAfold at 37 degrees with constraints
vienna_input = str(sequence) + '\n' + str(structure) # the input format
vienna_call = subprocess.run(vienna_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE, input = vienna_input, encoding = 'ascii')
output = vienna_call.stdout.split('\n')
if len(output) > 1: # if there is a result
output = output[-2]
output = output.split()
structure_out = output[0] # gets last line's structure (always will be first element when sliced)
energy = output[-1].replace(')', '').replace('(', '') # get energy (always will be last element in slice)
errors = vienna_call.stderr.replace('\n','') #changed from ' '
return structure_out, energy, errors
#Make antiterminator constraints for folding
def make_antiterm_constraints(sequence, structure):
if
|
pd.isna(sequence)
|
pandas.isna
|
#!/usr/bin/env python
'''
pubmed: part of the pyneurovault package
pyneurovault: a python wrapped for the neurovault api
'''
from Bio import Entrez
from nltk import (
PorterStemmer,
word_tokenize
)
import os
import pandas as pd
import re
import sys
import tarfile
__author__ = ["Poldracklab","<NAME>","<NAME>","<NAME>"]
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2015/01/16 $"
__license__ = "BSD"
# Pubmed
# These functions will find papers of interest to crosslist with Neurosynth
class Pubmed:
"""Init Pubmed Object"""
def __init__(self,email):
self.email = email
def _get_pmc_lookup(self):
print("Downloading latest version of pubmed central ftp lookup...")
self.ftp = pd.read_csv("ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/file_list.txt",skiprows=1,sep="\t",header=None)
self.ftp.columns = ["URL","JOURNAL","PMCID"]
def get_pubmed_central_ids(self):
if not self.ftp: self._get_pmc_lookup()
return list(self.ftp["PMCID"])
"""Download full text of articles with pubmed ids pmids to folder"""
def download_pubmed(self,pmids,download_folder):
if not self.ftp: self._get_pmc_lookup()
# pmids = [float(x) for x in pmids]
# Filter ftp matrix to those ids
# I couldn't figure out how to do this in one line
subset =
|
pd.DataFrame(columns=self.ftp.columns)
|
pandas.DataFrame
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from io import StringIO
from typing import Tuple
import pandas as pd
import pytest
from edfi_lms_ds_loader.helpers.assignment_splitter import split
def describe_given_two_canvas_assignments_with_variable_number_submission_types() -> None:
@pytest.fixture
def when_splitting_the_assignments() -> Tuple[pd.DataFrame, pd.DataFrame]:
# Arrange
data = StringIO(
"""LMSSectionSourceSystemIdentifier,SourceSystemIdentifier,SubmissionType,SourceSystem,CreateDate,LastModifiedDate
104,111,"['online_text_entry', 'online_upload']",Canvas,2021-03-11,2021-03-12
104,112,['online_upload'],Canvas,2021-03-13,2021-03-14"""
)
assignments_df = pd.read_csv(data)
# Act
assignments_df, submission_types_df = split(assignments_df) # type: ignore
return assignments_df, submission_types_df
def it_should_remove_SubmissionType_from_the_assignments_DataFrame(
when_splitting_the_assignments,
) -> None:
df, _ = when_splitting_the_assignments
assert "SubmissionType" not in list(df.columns)
def it_should_preserve_other_columns_and_values_in_assignments_DataFrame(
when_splitting_the_assignments,
) -> None:
df, _ = when_splitting_the_assignments
# Testing one row seems sufficient
row = df.iloc[0]
assert row["SourceSystemIdentifier"] == 111
assert row["SourceSystem"] == "Canvas"
assert row["LMSSectionSourceSystemIdentifier"] == 104
assert row["CreateDate"] == "2021-03-11"
assert row["LastModifiedDate"] == "2021-03-12"
def it_should_map_111_online_text_entry_to_submission_types(
when_splitting_the_assignments,
) -> None:
_, df = when_splitting_the_assignments
row = df.iloc[0]
assert row["SourceSystemIdentifier"] == 111
assert row["SourceSystem"] == "Canvas"
assert row["SubmissionType"] == "online_text_entry"
assert row["CreateDate"] == "2021-03-11"
def it_should_map_111_online_upload_to_submission_types(
when_splitting_the_assignments,
) -> None:
_, df = when_splitting_the_assignments
# NOTE: keep an eye on this, not sure that the row numbers are actually
# deterministic. If this fails then we'll need to change to lookup the
# row with a filter, instead of assuming the row number.
row = df.iloc[2]
assert row["SourceSystemIdentifier"] == 111
assert row["SourceSystem"] == "Canvas"
assert row["SubmissionType"] == "online_upload"
assert row["CreateDate"] == "2021-03-11"
def it_should_map_112_online_upload_to_submission_types(
when_splitting_the_assignments,
) -> None:
_, df = when_splitting_the_assignments
row = df.iloc[1]
assert row["SourceSystemIdentifier"] == 112
assert row["SourceSystem"] == "Canvas"
assert row["SubmissionType"] == "online_upload"
assert row["CreateDate"] == "2021-03-13"
def describe_given_one_canvas_assignment_with_one_submission_types() -> None:
@pytest.fixture
def when_splitting_the_assignments() -> Tuple[pd.DataFrame, pd.DataFrame]:
# Arrange
data = StringIO(
"""LMSSectionSourceSystemIdentifier,SourceSystemIdentifier,SubmissionType,SourceSystem,CreateDate,LastModifiedDate
104,112,['online_upload'],Canvas,2021-03-11,2021-03-12"""
)
assignments_df = pd.read_csv(data)
# Act
assignments_df, submission_types_df = split(assignments_df) # type: ignore
return assignments_df, submission_types_df
def it_should_map_112_online_upload_to_submission_types(
when_splitting_the_assignments,
) -> None:
_, df = when_splitting_the_assignments
row = df.iloc[0]
assert row["SourceSystemIdentifier"] == 112
assert row["SourceSystem"] == "Canvas"
assert row["SubmissionType"] == "online_upload"
assert row["CreateDate"] == "2021-03-11"
def describe_given_there_are_no_assignments() -> None:
@pytest.fixture
def when_splitting_the_assignments() -> Tuple[pd.DataFrame, pd.DataFrame]:
# Arrange
assignments_df = pd.DataFrame()
# Act
assignments_df, submission_types_df = split(assignments_df) # type: ignore
return assignments_df, submission_types_df
def it_should_create_empty_assignments_DataFrame(when_splitting_the_assignments) -> None:
df, _ = when_splitting_the_assignments
assert df.empty
def it_should_create_empty_submission_types_DataFrame(
when_splitting_the_assignments,
) -> None:
_, df = when_splitting_the_assignments
assert df.empty
def describe_given_one_canvas_assignment_with_no_submission_types() -> None:
@pytest.fixture
def when_splitting_the_assignments() -> Tuple[pd.DataFrame, pd.DataFrame]:
# Arrange
data = StringIO(
"""LMSSectionSourceSystemIdentifier,SourceSystemIdentifier,SubmissionType,SourceSystem,CreateDate,LastModifiedDate
104,112,,Canvas,2021-03-11,2021-03-12"""
)
assignments_df =
|
pd.read_csv(data)
|
pandas.read_csv
|
"""
Medical lexicon NLP extraction pipeline
File contains: Compares the validation set with the NLP pipeline's labeling and outputs some relevant statistics afterwards in a CSV-style table.
-- (c) <NAME> 2019 - Team D in the HST 953 class
"""
from na_pipeline_tool.utils import logger
from na_pipeline_tool.utils import config
from na_pipeline_tool.utils import helper_classes
from na_pipeline_tool import utils
from na_pipeline_tool.utils import progressbar
import re
import pandas as pd
from joblib import Parallel, delayed
import sys, os
import collections
import numpy as np
import sklearn.metrics
class ValidationTableModule(helper_classes.Module):
def __init__(self):
super().__init__()
self._validation_set = config.get_pipeline_config_item(self.module_name(), 'validation_set_file', None)
self._df_notes_labeled_paths = config.get_pipeline_config_item(self.module_name(), 'input_note_files', [])
self._loaded_df = []
self._compare_df = None
self._orig_df = None
self._loaded_validation = None
self._loaded_validation_labels = None
self._loaded_validation_label_map = None
logger.log_info('Loading validation note labeling file')
self._loading_validation_labeling_file()
logger.log_info('DONE: Loading validation note labeling file')
logger.log_info('Loading NLP pipeline processed note files')
self._loading_note_files()
logger.log_info('DONE: NLP pipeline processed note files')
logger.log_info('Computing and outputting statistics')
line_list=[]
for _ in self._loaded_df:
line_list.append(';')
table = self._do_statistics(_)
for _r in range(len(table[0])):
elems = [_c[_r] for _c in table]
line_list.append(';'.join(elems))
line_list.append(';')
line_list.append(';')
logger.log_info('CSV Table Output:')
for _ in line_list:
print(_)
def _loading_note_files(self):
if not self._df_notes_labeled_paths:
raise RuntimeError('Please specify valid note input files.')
def load_file(path):
filename = utils.default_dataframe_name(path)
assert os.path.isfile(filename), 'Could not find note parquet file: {}'.format(filename)
df = pd.read_parquet(filename)
df.columns = [_.upper() for _ in df.columns]
assert 'ROW_ID' in list(df.columns), 'Notes file need to have columns: Row_id, predicted_categories'
assert 'PREDICTED_CATEGORIES' in list(df.columns), "Processed note file needs to have the PREDICTED_CATEGORIES column generated by e.g. the negation module."
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.upper()
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.replace(' ', '_')
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.split('|')
if 'FOUND_EVIDENCE' in list(df.columns):
df['FOUND_EVIDENCE'] = df['FOUND_EVIDENCE'].astype(bool)
df = df[df['FOUND_EVIDENCE']]
return df
for _ in self._df_notes_labeled_paths:
self._loaded_df.append(load_file(_))
unique_labels = []
all_pred_cats = []
for _ in self._loaded_df:
all_pred_cats.extend(list(_.PREDICTED_CATEGORIES))
for _ in [*all_pred_cats, self._loaded_validation_labels]:
unique_labels.extend(_)
unique_labels = set(unique_labels)
lbl_id = 3
self._loaded_validation_label_map = {"NONE" : 1, "Any" : 2}
for _lbl in unique_labels:
if _lbl == "NONE":
continue
if _lbl == "Any":
continue
self._loaded_validation_label_map[_lbl] = lbl_id
lbl_id += 1
for _ in self._loaded_df:
_['PREDICTED_CATEGORIES'] = _.PREDICTED_CATEGORIES.apply(lambda x: [self._loaded_validation_label_map[_] for _ in x])
_['PREDICTED_CATEGORIES'] = _.PREDICTED_CATEGORIES.apply(lambda x: [*x, 2] if not 1 in x else x)
self._loaded_validation['NOTE_TYPES'] = self._loaded_validation.NOTE_TYPES.apply(lambda x: [self._loaded_validation_label_map[_] for _ in x])
self._loaded_validation['NOTE_TYPES'] = self._loaded_validation.NOTE_TYPES.apply(lambda x: [*x, 2] if not 1 in x else x)
def _loading_validation_labeling_file(self):
assert self._validation_set, 'Please specify a validation labeling file.'
try:
with open(self._validation_set, 'r') as file:
self._loaded_validation = file.readlines()
self._loaded_validation = self._loaded_validation[1:]
self._loaded_validation = [_.strip() for _ in self._loaded_validation]
self._loaded_validation = [_.split(',') for _ in self._loaded_validation]
self._loaded_validation = [[int(_[0]), [_.upper().replace(' ', '_') for _ in str(_[1]).split('|')], (int(_[2]) > 0)] for _ in self._loaded_validation]
self._loaded_validation = pd.DataFrame(self._loaded_validation, columns=['ROW_ID', 'NOTE_TYPES', 'VALID_INCLUDED'])
self._loaded_validation.loc[~self._loaded_validation['VALID_INCLUDED'], 'NOTE_TYPES'] = pd.Series([['NONE']]*self._loaded_validation.shape[0])
except:
raise RuntimeError('Error while processing validation labeling file. Check file structure.')
self._loaded_validation_labels = []
for _i, _ in self._loaded_validation.iterrows():
self._loaded_validation_labels.extend(_['NOTE_TYPES'])
self._loaded_validation_labels = set(self._loaded_validation_labels)
def dump_examples_for_comparison(self):
if not self._compare_df:
logger.log_warn('Could not find comparison df - Skipping dumping of exemplary notes.')
return
self._get_examples_for_categories = [_.upper() for _ in self.get_examples_for_categories]
if not self._get_examples_for_categories:
logger.log_warn('No categories specified for dumping exemplary sentences.')
return
unknown_categories = [_ for _ in self._get_examples_for_categories if not _ in [*self._get_examples_for_categories, 'NO_FINDING']]
if unknown_categories:
logger.log_warn('The following categories are not present in the provided dataframes: {}'.format(unknown_categories))
return
example_list = []
# for _cat in self._get_examples_for_categories:
# # Get example sentences
def _do_statistics(self, df_cmp):
validset = self._loaded_validation.sort_values('ROW_ID').reset_index(drop=True)[['ROW_ID', 'NOTE_TYPES']].copy()
validset = validset.drop_duplicates(subset=['ROW_ID'])
predicted = df_cmp[['ROW_ID', 'PREDICTED_CATEGORIES']].copy()
predicted = predicted.rename(columns={'PREDICTED_CATEGORIES' : 'PREDICTED_CAT'})
predicted = predicted.drop_duplicates(subset=['ROW_ID'])
validset = validset.merge(predicted, how='left', on='ROW_ID')
validset.loc[validset['PREDICTED_CAT'].isnull(), 'PREDICTED_CAT'] = pd.Series([[1]]*validset.shape[0])
validset.loc[validset['NOTE_TYPES'].isnull(), 'NOTE_TYPES'] =
|
pd.Series([[1]]*validset.shape[0])
|
pandas.Series
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : analyzer.py
@Desc :
@Project : src
@Contact : <EMAIL>
@License : (C)Copyright 2018-2020, 1UVU.COM
@WebSite : 1uvu.com
@Modify Time @Author @Version
------------ ------- --------
2020/08/14 20:14 1uvu 1.0
"""
from collections import Counter
import pandas as pd
import numpy as np
import json
import copy
import re
from utils import stem, similar_replace, remove_chore
from plot import plot_pipeline
from settings import *
# todo
# refactor this function
def topics_analysis(topics: pd.Series, opt="freq", args=None) -> {}:
"""
:param topics: topics series
:param opt: 'freq' means frequency or 'cite', as the basis of topics ranking
:param args: a dict like {"base": None, "year": None}, base is cite or None, and year series
:return: like the following dict struct, the all is all topics,
a tuple list like [(<topics str>, <freq or cites>), (...), ...],
and the items contain 5 years 2016-2020's topics rank
"""
rtn = {
"all": None,
"years": [
]
}
rtn_year = {
"year": "",
"items": {}
}
year_list = [str(y) for y in args["year"]]
base_list = args["base"]
year_set = [str(y) for y in sorted(set(year_list), reverse=False)]
if opt == "freq":
# all freq rank
all_topics = []
for t in topics:
if pd.isna(t):
all_topics += [nan_str]
else:
all_topics += list(set(re.split(",", t)))
all = dict(Counter(all_topics))
rtn["all"] = all
for y in year_set:
year = copy.deepcopy(rtn_year)
year["year"] = y
year_topics = []
for _y, _t in zip(year_list, topics):
if _y == y:
if pd.isna(_t):
year_topics += [nan_str]
else:
year_topics += list(set(re.split(",", _t)))
items = dict(Counter(year_topics))
year["items"] = items
rtn["years"].append(year)
elif opt == "cite":
# if topics nan, then pass
# merge => {"topic", cite}
all = {nan_str: 0}
for b, t in zip(base_list, topics):
if pd.isna(t):
all[nan_str] += int(b)
else:
for tt in re.split(",", t):
if tt in all.keys():
all[tt] += int(b)
else:
all[tt] = int(b)
rtn["all"] = all
for y in year_set:
year = copy.deepcopy(rtn_year)
year["year"] = y
items = {nan_str: 0}
for _y, _t, _b in zip(year_list, topics, base_list):
if _y == y:
if pd.isna(_t):
items[nan_str] += int(_b)
else:
for _tt in re.split(",", _t):
if _tt in items.keys():
items[_tt] += int(_b)
else:
items[_tt] = int(_b)
year["items"] = items
rtn["years"].append(year)
pass
else:
raise Exception("Invalid option string.")
return rtn
def topics_vector(df: pd.DataFrame, security_topics: list) -> dict:
rtn_topic = {}
origin_topics = df["topics"].tolist()
urls = df["url"].tolist()
topics = [(u, re.split(",", str(o))) for u, o in zip(urls, origin_topics)]
for s_topic in security_topics:
for topic in topics:
if s_topic in topic[1]:
if s_topic not in rtn_topic.keys():
rtn_topic[s_topic] = [[topic[0]], [topic[1]]]
else:
rtn_topic[s_topic][0].append(topic[0])
rtn_topic[s_topic][1].append(topic[1])
ul = []
for k in rtn_topic.keys():
ul += rtn_topic[k][0]
for t in rtn_topic[k][1]:
try:
t.remove(k)
except:
pass
try:
t.remove('')
except:
pass
try:
t.remove(nan_str)
except:
pass
jf = open(analyzer_output_dir + "/security.json", "w")
js = json.dumps(rtn_topic)
jf.write(js)
jf.close()
us = set(ul)
ddf = df[df['url'].isin(us)]
ddf.to_excel(analyzer_output_dir = "/all-security.xlsx", index=False)
df1 =
|
pd.DataFrame(columns=["title", "year", "abstract", "url", "topics"])
|
pandas.DataFrame
|
import json
import logging
import numpy as np
import os
import pandas as pd
import tqdm
from typing import List
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def log_training_dynamics(output_dir: os.path,
epoch: int,
train_ids: List[int],
train_logits: List[List[float]],
train_golds: List[int]):
"""
Save training dynamics (logits) from given epoch as records of a `.jsonl` file.
"""
td_df = pd.DataFrame({"guid": train_ids,
f"logits_epoch_{epoch}": train_logits,
"gold": train_golds})
logging_dir = os.path.join(output_dir, f"training_dynamics")
# Create directory for logging training dynamics, if it doesn't already exist.
if not os.path.exists(logging_dir):
os.makedirs(logging_dir)
epoch_file_name = os.path.join(logging_dir, f"dynamics_epoch_{epoch}.jsonl")
# Log training dynamics in epoch file: Create if not present else read, concatenate and write again
if not os.path.exists(epoch_file_name):
td_df.to_json(epoch_file_name, lines=True, orient="records")
else:
f=pd.read_json(epoch_file_name, orient="records", lines=True)
td_df=
|
pd.concat((f, td_df), axis=0)
|
pandas.concat
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(
|
StringIO(self.data1)
|
pandas.compat.StringIO
|
import numpy as np
import pandas as pd
import os.path
from pytest import approx, raises, mark, skip
import lenskit.metrics.predict as pm
import lk_test_utils as lktu
def test_check_missing_empty():
pm._check_missing(pd.Series([]), 'error')
# should pass
assert True
def test_check_missing_has_values():
pm._check_missing(pd.Series([1, 3, 2]), 'error')
# should pass
assert True
def test_check_missing_nan_raises():
with raises(ValueError):
pm._check_missing(pd.Series([1, np.nan, 3]), 'error')
def test_check_missing_raises():
data = pd.Series([1, 7, 3], ['a', 'b', 'd'])
ref =
|
pd.Series([3, 2, 4], ['b', 'c', 'd'])
|
pandas.Series
|
__author__ = "<NAME>"
# This file contains functions for generating figures used in exploratory analysis of
# iCGM Sensitivity Analysis results.
#
# This includes a number of different visualization for checking different aspects of the results
# and all of the figures that were previously made for the non-pairwise version of this analysis.
#
# The relevant code for final report figures has
# been moved to icgm_sensitivity_analysis_report_figures_and_tables.py. The code
# in this longer exploratory analysis file is no longer being maintained but
# has been left as is, in case a similar exploratory
# analysis is conducted at some point or these visualization functions are useful for
# adding to a data science viz-tools.
#
# %% REQUIRED LIBRARIES
import os
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
import datetime as dt
import itertools
from src.visualization.save_view_fig import save_view_fig
import json
from scipy import stats
import tidepool_data_science_metrics as metrics
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
utc_string = dt.datetime.utcnow().strftime("%Y-%m-%d-%H-%m-%S")
# Calculate MBE and MARD
# (https://github.com/tidepool-org/icgm-sensitivity-analysis/blob/jameno/analysis-tables/src/simulator_functions.py)
def add_error_fields(df):
"""
Parameters
----------
df: dataframe
dataframe to add error fields to (for use in MARD and MBE calculations)
Returns
-------
df: dataframe
dataframe with new error field columns
"""
# default icgm and ysi ranges [40, 400] and [0, 900]
sensor_bg_range = (40, 400)
bg_range = (0, 900)
sensor_min, sensor_max = sensor_bg_range
bg_min, bg_max = bg_range
# calculate the icgm error (difference and percentage)
sensor_bg_values = df["bg_sensor"].values
bg_values = df["bg"].values
icgm_error = sensor_bg_values - bg_values
df["icgmError"] = icgm_error
abs_difference_error = np.abs(icgm_error)
df["absError"] = abs_difference_error
df["absRelDiff"] = 100 * abs_difference_error / bg_values
df["withinMeasRange"] = (sensor_bg_values >= sensor_min) & (
sensor_bg_values <= sensor_max
)
return df
def calc_mbe(df):
"""
Calculate mean bias
Parameters
----------
df: dataframe
dataframe to calculate mean bias error (MBE) from
Returns
-------
mean bias error calculation
"""
# Default icgm and ysi ranges [40, 400] and [0, 900]
df = add_error_fields(df)
return np.mean(df.loc[df["withinMeasRange"], "icgmError"])
def calc_mard(df):
"""
Calculate Mean Absolute Relative Deviation (MARD)
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5375072/
Parameters
----------
df: dataframe
dataframe to calculate mean absolute relative deviation (MARD) from
Returns
-------
mard calculation
"""
df = add_error_fields(df)
abs_relative_difference_in_measurement_range = df.loc[
df["withinMeasRange"], "absRelDiff"
]
return np.mean(abs_relative_difference_in_measurement_range)
# Parse out simulation id
def get_sim_id(patient_characteristics_df, filename):
"""
Parse out simulation ID from the filename and patient characteristics data frame
Parameters
----------
patient_characteristics_df: dataframe
dataframe of patient characteristics
filename: str
filename corresponding to the simulation
Returns
-------
"""
sensor_num = (
filename.split("/")[-1]
.split(".")[2]
.replace("s", "")
.replace("Senor", "Sensor")
)
vp_id = (
patient_characteristics_df["patient_scenario_filename"]
.iloc[0]
.split("/")[-1]
.split(".")[0]
.replace("train_", "")
)
bg_test_condition = filename.split(".")[1]
analysis_type = filename.split(".")[3]
sim_id = (
"vp"
+ str(vp_id)
+ ".bg"
+ ".s"
+ str(sensor_num)
+ "."
+ str(bg_test_condition)
+ "."
+ analysis_type
)
return sim_id
def get_data_old_format(
filename, simulation_df, patient_characteristics_df, sensor_characteristics_df=""
):
"""
Returns a list of data for simulation dataframes that are in the old data format.
Parameters
----------
filename: str
name of file corresponding
simulation_df: dataframe
dataframe of the particular simulation want to return data for
patient_characteristics_df: dataframe
dataframe of patient characteristics
sensor_characteristics_df: dataframe
dataframe of sensor characteristics
Returns
-------
list of data items that will be a row in aggregated summary dataframe
"""
sim_id = get_sim_id(patient_characteristics_df, filename)
virtual_patient_num = "vp" + str(
patient_characteristics_df["patient_scenario_filename"]
.iloc[0]
.split("/")[-1]
.split(".")[0]
.replace("train_", "")
)
sensor_num = (
filename.split("/")[-1]
.split(".")[2]
.replace("s", "")
.replace("Senor", "Sensor")
)
patient_scenario_filename = (
patient_characteristics_df["patient_scenario_filename"].iloc[0].split("/")[-1]
)
age = patient_characteristics_df["age"].iloc[0]
ylw = patient_characteristics_df["ylw"].iloc[0]
cir = simulation_df["cir"].iloc[0]
isf = simulation_df["isf"].iloc[0]
sbr = simulation_df["sbr"].iloc[0]
starting_bg = simulation_df["bg"].iloc[0]
starting_bg_sensor = simulation_df["bg_sensor"].iloc[0]
true_bolus = simulation_df["true_bolus"].iloc[1]
if "IdealSensor" in filename:
initial_bias = np.nan
bias_norm_factor = np.nan
bias_drift_oscillations = np.nan
bias_drift_range_start = np.nan
bias_drift_range_end = np.nan
noise_coefficient = np.nan
mard = np.nan
mbe = np.nan
else:
initial_bias = sensor_characteristics_df["initial_bias"].iloc[0]
bias_norm_factor = sensor_characteristics_df["bias_norm_factor"].iloc[0]
bias_drift_oscillations = sensor_characteristics_df[
"bias_drift_oscillations"
].iloc[0]
bias_drift_range_start = sensor_characteristics_df[
"bias_drift_range_start"
].iloc[0]
bias_drift_range_end = sensor_characteristics_df["bias_drift_range_end"].iloc[0]
noise_coefficient = sensor_characteristics_df["noise_coefficient"].iloc[0]
mard = calc_mard(simulation_df)
mbe = calc_mbe(simulation_df)
delay = np.nan
bias_drift_type = np.nan
bias_type = np.nan
noise_per_sensor = np.nan
noise = np.nan
bias_factor = np.nan
phi_drift = np.nan
drift_multiplier = np.nan
drift_multiplier_start = np.nan
drift_multiplier_end = np.nan
noise_max = np.nan
bg_test_condition = filename.split(".")[1].replace("bg", "")
analysis_type = filename.split(".")[3]
LBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[0]
LBGI_RS = metrics.glucose.lbgi_risk_score(LBGI)
DKAI = metrics.insulin.dka_index(simulation_df["iob"], simulation_df["sbr"].iloc[0])
DKAI_RS = metrics.insulin.dka_risk_score(DKAI)
HBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[1]
BGRI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[2]
percent_lt_54 = metrics.glucose.percent_values_lt_54(bg_array=simulation_df["bg"])
return [
filename,
sim_id,
virtual_patient_num,
sensor_num,
patient_scenario_filename,
age,
ylw,
cir,
isf,
sbr,
starting_bg,
starting_bg_sensor,
true_bolus,
initial_bias,
bias_norm_factor,
bias_drift_oscillations,
bias_drift_range_start,
bias_drift_range_end,
noise_coefficient,
delay,
bias_drift_type,
bias_type,
noise_per_sensor,
noise,
bias_factor,
phi_drift,
drift_multiplier,
drift_multiplier_start,
drift_multiplier_end,
noise_max,
mard,
mbe,
bg_test_condition,
analysis_type,
LBGI,
LBGI_RS,
DKAI,
DKAI_RS,
HBGI,
BGRI,
percent_lt_54,
]
def get_data(
filename, simulation_df, simulation_characteristics_json_data, baseline=False
):
"""
Returns a list of data from the simulation files that are in the new format
Parameters
----------
filename: str
name of file corresponding
simulation_df: dataframe
dataframe of the particular simulation want to return data for
simulation_characteristics_json_data: dataframe
json simulation characteristics data corresponding to that simulaton
baseline: bool
whether this particular file is a baseline file
Returns
-------
list of data items that will be a row in aggregated summary dataframe
"""
sim_id = simulation_characteristics_json_data["sim_id"]
virtual_patient_num = simulation_characteristics_json_data["sim_id"].split(".")[0]
sensor_num = filename.split(".")[2]
patient_scenario_filename = filename.split(".")[0]
age = simulation_characteristics_json_data["controller"]["config"]["age"]
ylw = simulation_characteristics_json_data["controller"]["config"]["ylw"]
cir = simulation_characteristics_json_data["patient"]["config"][
"carb_ratio_schedule"
]["schedule"][0]["setting"]
isf = simulation_characteristics_json_data["patient"]["config"][
"insulin_sensitivity_schedule"
]["schedule"][0]["setting"]
sbr = simulation_characteristics_json_data["patient"]["config"]["basal_schedule"][
"schedule"
][0]["setting"]
starting_bg = simulation_df["bg"].iloc[0]
starting_bg_sensor = simulation_df["bg_sensor"].iloc[0]
true_bolus = simulation_df["true_bolus"].iloc[1]
if baseline:
initial_bias = np.nan
bias_norm_factor = np.nan
bias_drift_oscillations = np.nan
bias_drift_range_start = np.nan
bias_drift_range_end = np.nan
noise_coefficient = np.nan
delay = np.nan
bias_drift_type = np.nan
bias_type = np.nan
noise_per_sensor = np.nan
noise = np.nan
bias_factor = np.nan
phi_drift = np.nan
drift_multiplier = np.nan
drift_multiplier_start = np.nan
drift_multiplier_end = np.nan
noise_max = np.nan
mard = np.nan
mbe = np.nan
else:
initial_bias = simulation_characteristics_json_data["patient"]["sensor"][
"initial_bias"
]
bias_norm_factor = simulation_characteristics_json_data["patient"]["sensor"][
"bias_norm_factor"
]
bias_drift_oscillations = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_oscillations"]
bias_drift_range_start = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_range_start"]
bias_drift_range_end = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_range_end"]
noise_coefficient = simulation_characteristics_json_data["patient"]["sensor"][
"noise_coefficient"
]
delay = simulation_characteristics_json_data["patient"]["sensor"]["delay"]
bias_drift_type = simulation_characteristics_json_data["patient"]["sensor"][
"bias_drift_type"
]
bias_type = simulation_characteristics_json_data["patient"]["sensor"][
"bias_type"
]
noise_per_sensor = simulation_characteristics_json_data["patient"]["sensor"][
"noise_per_sensor"
]
noise = simulation_characteristics_json_data["patient"]["sensor"]["noise"]
bias_factor = simulation_characteristics_json_data["patient"]["sensor"][
"bias_factor"
]
phi_drift = simulation_characteristics_json_data["patient"]["sensor"][
"phi_drift"
]
drift_multiplier = simulation_characteristics_json_data["patient"]["sensor"][
"drift_multiplier"
]
drift_multiplier_start = simulation_characteristics_json_data["patient"][
"sensor"
]["drift_multiplier_start"]
drift_multiplier_end = simulation_characteristics_json_data["patient"][
"sensor"
]["drift_multiplier_end"]
noise_max = simulation_characteristics_json_data["patient"]["sensor"][
"noise_max"
]
mard = calc_mard(simulation_df)
mbe = calc_mbe(simulation_df)
bg_test_condition = filename.split(".")[1].replace("bg", "")
analysis_type = filename.split(".")[3]
LBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[0]
LBGI_RS = metrics.glucose.lbgi_risk_score(LBGI)
DKAI = metrics.insulin.dka_index(simulation_df["iob"], simulation_df["sbr"].iloc[0])
DKAI_RS = metrics.insulin.dka_risk_score(DKAI)
HBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[1]
BGRI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[2]
percent_lt_54 = metrics.glucose.percent_values_lt_54(bg_array=simulation_df["bg"])
return [
filename,
sim_id,
virtual_patient_num,
sensor_num,
patient_scenario_filename,
age,
ylw,
cir,
isf,
sbr,
starting_bg,
starting_bg_sensor,
true_bolus,
initial_bias,
bias_norm_factor,
bias_drift_oscillations,
bias_drift_range_start,
bias_drift_range_end,
noise_coefficient,
delay,
bias_drift_type,
bias_type,
noise_per_sensor,
noise,
bias_factor,
phi_drift,
drift_multiplier,
drift_multiplier_start,
drift_multiplier_end,
noise_max,
mard,
mbe,
bg_test_condition,
analysis_type,
LBGI,
LBGI_RS,
DKAI,
DKAI_RS,
HBGI,
BGRI,
percent_lt_54,
]
# %% Visualization Functions
# %% FUNCTIONS
# TODO: us mypy and specify the types
utc_string = dt.datetime.utcnow().strftime("%Y-%m-%d-%H-%m-%S")
# TODO: automatically grab the code version to add to the figures generated
code_version = "v0-1-0"
# Adding in some generic methods for tables based on bins
def bin_data(bin_breakpoints):
"""
Parameters
----------
bin_breakpoints: array-like
Array-like containing Interval objects from which to build the IntervalIndex.
Returns
-------
interval index
"""
# the bin_breakpoints are the points that are greater than or equal to
return pd.IntervalIndex.from_breaks(bin_breakpoints, closed="left")
def get_metadata_tables(demographic_df, fig_path):
"""
Parameters
----------
demographic_df: dataframe
dataframe of demographic characteristics (age, ylw) for patient corresponding to simulation
fig_path: str
filepath of where to save the tables
Returns
-------
"""
# %% prepare demographic data for tables
virtual_patient_group = demographic_df.groupby("virtual_patient_num")
demographic_reduced_df = virtual_patient_group[
["age", "ylw", "CIR", "ISF", "SBR"]
].median()
# get replace age and years living with (ylw) < 0 with np.nan
demographic_reduced_df[demographic_reduced_df < 0] = np.nan
# %% Age Breakdown Table
# TODO: this can be generalized for any time we want to get counts by bins
age_bin_breakpoints = np.array([0, 7, 14, 25, 50, 100])
age_bins = bin_data(age_bin_breakpoints)
# make an age table
age_table = pd.DataFrame(index=age_bins.astype("str"))
age_table.index.name = "Age (years old)"
# cut the data by bin
demographic_reduced_df["age_bin"] =
|
pd.cut(demographic_reduced_df["age"], age_bins)
|
pandas.cut
|
import pandas as pd
from sklearn.model_selection import train_test_split
raw_data_path = './RAW_Dataset'
destination_folder = './Dataset_Fix'
train_test_ratio = 0.10
train_valid_ratio = 0.80
first_n_words = 200
def trim_string(x):
x = x.split(maxsplit=first_n_words)
x = ' '.join(x[:first_n_words])
return x
# Read raw data
df_raw =
|
pd.read_csv(raw_data_path)
|
pandas.read_csv
|
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=
|
pd.DataFrame(dofterm)
|
pandas.DataFrame
|
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_work[idx,:] = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.array(samples_dict['prev_work'])
data_work[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['work'][date])*(np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_work[idx,:] = (0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0))))*np.array(samples_dict['prev_work'])
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_work_mean = np.mean(Re_work,axis=1)
Re_work_LL = np.quantile(Re_work, q=0.05/2, axis=1)
Re_work_UL = np.quantile(Re_work, q=1-0.05/2, axis=1)
# ----------------
# Home prevention
# ----------------
print('Home\n')
data_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
Re_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.ones(len(samples_dict['prev_home']))
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
new = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
data_home[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(np.sum(Nc_home,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
new_contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_home_mean = np.mean(Re_home,axis=1)
Re_home_LL = np.quantile(Re_home, q=0.05/2, axis=1)
Re_home_UL = np.quantile(Re_home, q=1-0.05/2, axis=1)
# ------------------
# School prevention
# ------------------
if j == 0:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-09-01'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1 * (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work']) # This is wrong, but is never used
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif j == 1:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_schools'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-11-16'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-11-16') < date <= pd.to_datetime('2020-12-18'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-12-18') < date <= pd.to_datetime('2021-01-04'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = tmp = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-01-04') < date <= pd.to_datetime('2021-02-15'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-02-15') < date <= pd.to_datetime('2021-02-21'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_schools_mean = np.mean(Re_schools,axis=1)
Re_schools_LL = np.quantile(Re_schools, q=0.05/2, axis=1)
Re_schools_UL = np.quantile(Re_schools, q=1-0.05/2, axis=1)
# -----
# Total
# -----
data_total = data_rest + data_work + data_home + data_schools
Re_total = Re_rest + Re_work + Re_home + Re_schools
Re_total_mean = np.mean(Re_total,axis=1)
Re_total_LL = np.quantile(Re_total, q=0.05/2, axis=1)
Re_total_UL = np.quantile(Re_total, q=1-0.05/2, axis=1)
# -----------------------
# Absolute contributions
# -----------------------
abs_rest = np.zeros(data_rest.shape)
abs_work = np.zeros(data_rest.shape)
abs_home = np.zeros(data_rest.shape)
abs_schools = np.zeros(data_schools.shape)
abs_total = data_total
for i in range(data_rest.shape[0]):
abs_rest[i,:] = data_rest[i,:]
abs_work[i,:] = data_work[i,:]
abs_home[i,:] = data_home[i,:]
abs_schools[i,:] = data_schools[i,:]
abs_schools_mean = np.mean(abs_schools,axis=1)
abs_schools_LL = np.quantile(abs_schools,LL,axis=1)
abs_schools_UL = np.quantile(abs_schools,UL,axis=1)
abs_rest_mean = np.mean(abs_rest,axis=1)
abs_rest_LL = np.quantile(abs_rest,LL,axis=1)
abs_rest_UL = np.quantile(abs_rest,UL,axis=1)
abs_work_mean = np.mean(abs_work,axis=1)
abs_work_LL = np.quantile(abs_work,LL,axis=1)
abs_work_UL = np.quantile(abs_work,UL,axis=1)
abs_home_mean = np.mean(abs_home,axis=1)
abs_home_LL = np.quantile(abs_home,LL,axis=1)
abs_home_UL = np.quantile(abs_home,UL,axis=1)
abs_total_mean = np.mean(abs_total,axis=1)
abs_total_LL = np.quantile(abs_total,LL,axis=1)
abs_total_UL = np.quantile(abs_total,UL,axis=1)
# -----------------------
# Relative contributions
# -----------------------
rel_rest = np.zeros(data_rest.shape)
rel_work = np.zeros(data_rest.shape)
rel_home = np.zeros(data_rest.shape)
rel_schools = np.zeros(data_schools.shape)
rel_total = np.zeros(data_schools.shape)
for i in range(data_rest.shape[0]):
total = data_schools[i,:] + data_rest[i,:] + data_work[i,:] + data_home[i,:]
rel_rest[i,:] = data_rest[i,:]/total
rel_work[i,:] = data_work[i,:]/total
rel_home[i,:] = data_home[i,:]/total
rel_schools[i,:] = data_schools[i,:]/total
rel_total[i,:] = total/total
rel_schools_mean = np.mean(rel_schools,axis=1)
rel_schools_LL = np.quantile(rel_schools,LL,axis=1)
rel_schools_UL = np.quantile(rel_schools,UL,axis=1)
rel_rest_mean = np.mean(rel_rest,axis=1)
rel_rest_LL = np.quantile(rel_rest,LL,axis=1)
rel_rest_UL = np.quantile(rel_rest,UL,axis=1)
rel_work_mean = np.mean(rel_work,axis=1)
rel_work_LL = np.quantile(rel_work,LL,axis=1)
rel_work_UL = np.quantile(rel_work,UL,axis=1)
rel_home_mean = np.mean(rel_home,axis=1)
rel_home_LL = np.quantile(rel_home,LL,axis=1)
rel_home_UL = np.quantile(rel_home,UL,axis=1)
rel_total_mean = np.mean(rel_total,axis=1)
rel_total_LL = np.quantile(rel_total,LL,axis=1)
rel_total_UL = np.quantile(rel_total,UL,axis=1)
# ---------------------
# Append to dataframe
# ---------------------
df_rel[waves[j],"work_mean"] = rel_work_mean
df_rel[waves[j],"work_LL"] = rel_work_LL
df_rel[waves[j],"work_UL"] = rel_work_UL
df_rel[waves[j], "rest_mean"] = rel_rest_mean
df_rel[waves[j], "rest_LL"] = rel_rest_LL
df_rel[waves[j], "rest_UL"] = rel_rest_UL
df_rel[waves[j], "home_mean"] = rel_home_mean
df_rel[waves[j], "home_LL"] = rel_home_LL
df_rel[waves[j], "home_UL"] = rel_home_UL
df_rel[waves[j],"schools_mean"] = rel_schools_mean
df_rel[waves[j],"schools_LL"] = rel_schools_LL
df_rel[waves[j],"schools_UL"] = rel_schools_UL
df_rel[waves[j],"total_mean"] = rel_total_mean
df_rel[waves[j],"total_LL"] = rel_total_LL
df_rel[waves[j],"total_UL"] = rel_total_UL
copy1 = df_rel.copy(deep=True)
df_Re[waves[j],"work_mean"] = Re_work_mean
df_Re[waves[j],"work_LL"] = Re_work_LL
df_Re[waves[j],"work_UL"] = Re_work_UL
df_Re[waves[j], "rest_mean"] = Re_rest_mean
df_Re[waves[j],"rest_LL"] = Re_rest_LL
df_Re[waves[j],"rest_UL"] = Re_rest_UL
df_Re[waves[j], "home_mean"] = Re_home_mean
df_Re[waves[j], "home_LL"] = Re_home_LL
df_Re[waves[j], "home_UL"] = Re_home_UL
df_Re[waves[j],"schools_mean"] = Re_schools_mean
df_Re[waves[j],"schools_LL"] = Re_schools_LL
df_Re[waves[j],"schools_UL"] = Re_schools_UL
df_Re[waves[j],"total_mean"] = Re_total_mean
df_Re[waves[j],"total_LL"] = Re_total_LL
df_Re[waves[j],"total_UL"] = Re_total_UL
copy2 = df_Re.copy(deep=True)
df_abs[waves[j],"work_mean"] = abs_work_mean
df_abs[waves[j],"work_LL"] = abs_work_LL
df_abs[waves[j],"work_UL"] = abs_work_UL
df_abs[waves[j], "rest_mean"] = abs_rest_mean
df_abs[waves[j], "rest_LL"] = abs_rest_LL
df_abs[waves[j], "rest_UL"] = abs_rest_UL
df_abs[waves[j], "home_mean"] = abs_home_mean
df_abs[waves[j], "home_LL"] = abs_home_LL
df_abs[waves[j], "home_UL"] = abs_home_UL
df_abs[waves[j],"schools_mean"] = abs_schools_mean
df_abs[waves[j],"schools_LL"] = abs_schools_LL
df_abs[waves[j],"schools_UL"] = abs_schools_UL
df_abs[waves[j],"total_mean"] = abs_total_mean
df_abs[waves[j],"total_LL"] = abs_total_LL
df_abs[waves[j],"total_UL"] = abs_total_UL
df_rel = copy1
df_Re = copy2
#df_abs.to_excel('test.xlsx', sheet_name='Absolute contacts')
#df_rel.to_excel('test.xlsx', sheet_name='Relative contacts')
#df_Re.to_excel('test.xlsx', sheet_name='Effective reproduction number')
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-03-22'):
|
pd.to_datetime('2020-05-04')
|
pandas.to_datetime
|
import os
import pandas as pd
import flopy
import pyemu
ml = flopy.modflow.Modflow.load("dewater.nam",check=False,verbose=False)
ml.external_path = "ref"
ml.model_ws = '.'
decvar_file = "dewater.decvar"
hedcon_file = "dewater.hedcon"
# get hedcon locations
with open(hedcon_file,'r') as f:
[f.readline() for _ in range(4)]
hed_df =
|
pd.read_csv(f,usecols=[2,3],header=None,names=["row","col"],delim_whitespace=True)
|
pandas.read_csv
|
"""
Pull my Garmin sleep data via json requests.
This script was adapted from: https://github.com/kristjanr/my-quantified-sleep
The aforementioned code required the user to manually define
headers and cookies. It also stored all of the data within Night objects.
My modifications include using selenium to drive a Chrome browser. This avoids
the hassle of getting headers and cookies manually (the cookies would have to be updated
everytime the Garmin session expired). It also segments data requests because
Garmin will respond with an error if more than 32 days are requested at once. Lastly,
data is stored as a pandas dataframe and then written to a user-defined directory
as a pickle file.
Data is this processed and merged with older data from my Microsft smartwatch.
The merged data is also saved as pandas dataframes in pickle files.
Lastly, sunrise and sunset data is downloaded for all days in the sleep dataset.
This data is also archived as a pandas dataframe and saved as a pickle file.
The data update process hs been broken into steps so that progress can be passed
to the Dash app.
"""
# import base packages
import datetime, json, os, re, sys
from itertools import chain
from os.path import isfile
# import installed packages
import pytz, requests, chardet, brotli
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# input variables
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
else:
# running on heroku server
ENV = "heroku"
if ENV == "local":
proj_path = "C:/Users/adiad/Anaconda3/envs/SleepApp/sleep_app/" # read/write data dir
else:
proj_path = ""
GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google-chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
garmin_results_pkl_fn = "data/garmin_sleep_df.pkl" # name of pickle file to archive (combining new results with any previous Garmin) for easy updating and subsequent processing
garmin_results_json_fn = "data/new_garmin_sleep.json" # name of json file with only new raw results
garmin_results_csv_fn = "data/garmin_sleep_df.csv" # name of csv file to archive (combining new results with any previous)
all_descr_results_fn = "data/all_sleep_descr_df.pkl" # name of pickle file combining all Garmin & Microsift sleep session description data
all_event_results_fn = "data/all_sleep_event_df.pkl" # name of pickle file combining all Garmin & Microsoft event data
sun_pkl_fn = "data/sun_df.pkl" # name of pickel file to archive sunrise/sunset data
local_tz = "US/Eastern" # pytz local timezone for sunrise/sunset time conversion
sun_lat = 39.76838 # latitude where sunrise/sunset times are derived from
sun_lon = -86.15804 # longitude where sunrise/sunset times are derived from
run_browser_headless = False # will hide Firefox during execution if True
browser_action_timeout = 60 # max time (seconds) for browser wait operations
start_date = '2017-03-01' # first date to pull sleep data
end_date = str(datetime.date.today() - datetime.timedelta(days=1)) # last date to pull sleep data
user_name = "email address" # Garmin username
password = "password" # Garmin password
signin_url = "https://connect.garmin.com/signin/" # Garmin sign-in webpage
sleep_url_base = "https://connect.garmin.com/modern/sleep/" # Garmin sleep base URL (sans date)
sleep_url_json_req = "https://connect.garmin.com/modern/proxy/wellness-service/wellness/dailySleepsByDate"
def download(start_date, end_date, headers, session_id):
params = (
('startDate', start_date),
('endDate', end_date),
('_', session_id),
)
response = requests.get(sleep_url_json_req, headers=headers, params=params)
if response.status_code != 200:
print("RESPONSE ERROR RECEIVED:")
print('Status code: %d' % response.status_code)
response_dict = json.loads(response.content.decode('UTF-8'))
print('Content: %s' % response_dict["message"])
raise Exception
return response
def download_to_json(start_date, end_date, headers, session_id):
response = download(start_date, end_date, headers, session_id)
# most responses are in ascii (no encoding)
# sporadically a response will have brotli encoding
#print("The response is encoded with:", chardet.detect(response.content))
if chardet.detect(response.content)["encoding"] == 'ascii':
return json.loads(response.content)
else:
return brotli.decompress(response.content)
def converter(data, return_df=True):
# define functions which pass through None value because
# datetime functions don't accept value None
def sleep_timestamp(val):
if val is None:
return None
else:
return datetime.datetime.fromtimestamp(val / 1000, pytz.utc)
def sleep_timedelta(val):
if val is None:
return None
else:
return datetime.timedelta(seconds=val)
# initialize variables
if return_df:
nights = pd.DataFrame(columns=["Prev_Day", "Bed_Time", "Wake_Time",
"Awake_Dur", "Light_Dur", "Deep_Dur",
"Total_Dur", "Nap_Dur", "Window_Conf"])
i = 0
else:
nights = []
for d in data:
bed_time = sleep_timestamp(d['sleepStartTimestampGMT'])
wake_time = sleep_timestamp(d['sleepEndTimestampGMT'])
previous_day = datetime.date(*[int(datepart) for datepart in d['calendarDate'].split('-')]) - datetime.timedelta(days=1)
deep_duration = sleep_timedelta(d['deepSleepSeconds'])
light_duration = sleep_timedelta(d['lightSleepSeconds'])
total_duration = sleep_timedelta(d['sleepTimeSeconds'])
awake_duration = sleep_timedelta(d['awakeSleepSeconds'])
nap_duration = sleep_timedelta(d['napTimeSeconds'])
window_confirmed = d['sleepWindowConfirmed']
if return_df:
nights.loc[i] = [previous_day, bed_time, wake_time, awake_duration,
light_duration, deep_duration, total_duration,
nap_duration, window_confirmed]
i += 1
else:
night = Night(bed_time, wake_time, previous_day, deep_duration,
light_duration, total_duration, awake_duration)
nights.append(night, sort=True)
return nights
# this function returns a list of all dates in [date1, date2]
def daterange(date1, date2):
date_ls = [date1]
for n in range(int((date2 - date1).days)):
date_ls.append(date_ls[-1] + datetime.timedelta(days=1))
return date_ls
# steps to updating sleep data:
# Step 0: determine which dates are missing in the archived Garmin dataset,
# given the input start & end dates
# Step 1: Login to connect.garmin.com, get user setting credentials
# Step 2: Using credentials, download missing data from Garmin in json
# Step 3: process new Garmin data, merge it with archived data
# Step 4: download sunrise/sunset data for new dates and merge with archived data
def step0():
# make a list of all dates from first sleep date to last (fills any missing dates)
req_dates_ls = daterange(
datetime.datetime.strptime(start_date, "%Y-%m-%d").date(),
datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
)
# Look for previous results
if isfile(proj_path + garmin_results_pkl_fn):
nights_df = pd.read_pickle(proj_path + garmin_results_pkl_fn)
else:
nights_df = pd.DataFrame()
# if previous results were found, reduce requested dates to those not yet obtained
if len(nights_df) > 0:
# get list of requested dates not yet obtained
archive_dates_ls = list(nights_df["Prev_Day"])
new_req_dates_ls = np.setdiff1d(req_dates_ls, archive_dates_ls)
else:
new_req_dates_ls = req_dates_ls
#print("Archive max: ", max(archive_dates_ls))
#print("Request max: ", max(req_dates_ls))
if len(new_req_dates_ls) == 0:
msg = "Archived data is up to date, no new data is available"
else:
msg = "Current data was checked and " + str(len(new_req_dates_ls)) + " night(s) are needed"
return [msg, nights_df, new_req_dates_ls]
def step1():
opts = webdriver.ChromeOptions()
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
opts.add_argument('--disable-dev-shm-usage')
if ENV == "local":
if run_browser_headless:
opts.addArgument("headless")
assert opts.headless # Operating in headless mode
else:
opts.binary_location = GOOGLE_CHROME_PATH
# open firefox and goto Garmin's sign-in page
print("Opening Chrome browser")
driver = webdriver.Chrome(chrome_options=opts)
driver.get(signin_url)
# wait until sign-in fields are visible
wait = WebDriverWait(driver, browser_action_timeout)
wait.until(ec.frame_to_be_available_and_switch_to_it(("id","gauth-widget-frame-gauth-widget")))
wait.until(ec.presence_of_element_located(("id","username")))
# write login info to fields, then submit
print("Signing in to connect.garmin.com")
element = driver.find_element_by_id("username")
driver.implicitly_wait(5)
element.send_keys(user_name)
element = driver.find_element_by_id("password")
element.send_keys(password)
element.send_keys(Keys.RETURN)
wait.until(ec.url_changes(signin_url)) # wait until landing page is requested
driver.switch_to.default_content() # get out of iframe
# get dummy webpage to obtain all request headers
print("Loading dummy page to obtain headers")
driver.get(sleep_url_base + start_date)
request = driver.wait_for_request(sleep_url_base + start_date,
timeout=browser_action_timeout)
if (request.response.status_code != 200) | (~ hasattr(request, "headers")):
print("RESPONSE ERROR RECEIVED:")
if (request.response.status_code != 200):
print("Status code: %d" % request.response.status_code)
#response_dict = json.loads(request.content.decode('UTF-8'))
print("Reason: ", request.response.reason)
if (~ hasattr(request, "headers")):
print("Request did not have 'headers' attribute")
print("Request attributes: ", dir(request))
print("Request headers: ", request.headers)
#raise Exception
# close the Firefox browser
driver.close()
msg = "Logged in to connect.garmin.com"
return [msg, request]
def step2(request, new_req_dates_ls):
# transfer request headers
headers = {
"cookie": request.headers["Cookie"],
"referer": sleep_url_base + start_date,
"accept-encoding": request.headers["Accept-Encoding"],
"accept-language": "en-US", # request.headers["Accept-Language"],
"user-agent": request.headers["User-Agent"],
#"nk": "NT",
"accept": request.headers["Accept"],
"authority": request.headers["Host"],
#"x-app-ver": "4.25.3.0",
"upgrade-insecure-requests": request.headers["Upgrade-Insecure-Requests"]
}
# get the session id from the headers
re_session_id = re.compile("(?<=\$ses_id:)(\d+)")
session_id = re_session_id.search(str(request.headers)).group(0)
# Garmin will throw error if request time span exceeds 32 days
# therefore, request 32 days at a time
max_period_delta = datetime.timedelta(days=31)
data = [] # list of jsons, one per time period
get_dates_ls = new_req_dates_ls
while len(get_dates_ls) > 0:
period_start = min(get_dates_ls)
if (max(get_dates_ls) - period_start) > (max_period_delta - datetime.timedelta(days=1)):
period_end = period_start + max_period_delta
else:
period_end = max(get_dates_ls)
# note, this may request some dates which were already obtained
# since a contiguous period is being requested rather than 32 new dates
# duplicated dates will be dropped later
print("Getting data for period: [%s, %s]" % (period_start, period_end))
data.append(download_to_json(period_start, period_end, headers, session_id))
# trim dates list
get_dates_ls = [d for d, s in zip(get_dates_ls, np.array(get_dates_ls) > period_end) if s]
# combine list of jsons into one large json
data = list(chain.from_iterable(data))
# save raw Garmin json to project folder
with open(proj_path + garmin_results_json_fn, 'w') as fp:
json.dump(data, fp)
msg = "Data has been downloaded from Garmin"
return [msg, data]
def step3(nights_df, data, new_req_dates_ls):
# clean the new garmin data
new_nights_df = converter(data)
new_nights_df["Prev_Day"] = pd.to_datetime(new_nights_df["Prev_Day"])
if pd.to_datetime(new_nights_df["Bed_Time"]).dt.tz is None:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_convert(local_tz)
if pd.to_datetime(new_nights_df["Wake_Time"]).dt.tz is None:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_convert(local_tz)
new_nights_df["Light_Dur"] = pd.to_timedelta(new_nights_df["Light_Dur"], "days")
new_nights_df["Deep_Dur"] = pd.to_timedelta(new_nights_df["Deep_Dur"], "days")
new_nights_df["Total_Dur"] = pd.to_timedelta(new_nights_df["Total_Dur"], "days")
new_nights_df["Nap_Dur"] =
|
pd.to_timedelta(new_nights_df["Nap_Dur"], "days")
|
pandas.to_timedelta
|
import argparse
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
from tensorflow.python.ops.check_ops import assert_greater_equal_v2
import load_data
from tqdm import tqdm
import numpy as np
import pandas as pd
from math import e as e_VALUE
import tensorflow.keras.backend as Keras_backend
from sklearn.ensemble import RandomForestClassifier
from scipy.special import bdtrc
def func_CallBacks(Dir_Save=''):
mode = 'min'
monitor = 'val_loss'
# checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath= Dir_Save + '/best_model_weights.h5', monitor=monitor , verbose=1, save_best_only=True, mode=mode)
# Reduce_LR = tf.keras.callbacks.ReduceLROnPlateau(monitor=monitor, factor=0.1, min_delta=0.005 , patience=10, verbose=1, save_best_only=True, mode=mode , min_lr=0.9e-5 , )
# CSVLogger = tf.keras.callbacks.CSVLogger(Dir_Save + '/results.csv', separator=',', append=False)
EarlyStopping = tf.keras.callbacks.EarlyStopping( monitor = monitor,
min_delta = 0,
patience = 4,
verbose = 1,
mode = mode,
baseline = 0,
restore_best_weights = True)
return [EarlyStopping] # [checkpointer , EarlyStopping , CSVLogger]
def reading_terminal_inputs():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch" , help="number of epochs")
parser.add_argument("--bsize" , help="batch size")
parser.add_argument("--max_sample" , help="maximum number of training samples")
parser.add_argument("--naug" , help="number of augmentations")
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
parser.add_argument("--architecture_name", help='architecture name')
args = parser.parse_args()
epoch = int(args.epoch) if args.epoch else 3
number_augmentation = int(args.naug) if args.naug else 3
bsize = int(args.bsize) if args.bsize else 100
max_sample = int(args.max_sample) if args.max_sample else 1000
architecture_name = str(args.architecture_name) if args.architecture_name else 'DenseNet121'
return epoch, bsize, max_sample, architecture_name, number_augmentation
def mlflow_settings():
"""
RUN UI with postgres and HPC:
REMOTE postgres server:
# connecting to remote server through ssh tunneling
ssh -L 5000:localhost:5432 <EMAIL>
# using the mapped port and localhost to view the data
mlflow ui --backend-store-uri postgresql://artinmajdi:1234@localhost:5000/chest_db --port 6789
RUN directly from GitHub or show experiments/runs list:
export MLFLOW_TRACKING_URI=http://127.0.0.1:5000
mlflow runs list --experiment-id <id>
mlflow run --no-conda --experiment-id 5 -P epoch=2 https://github.com/artinmajdi/mlflow_workflow.git -v main
mlflow run mlflow_workflow --no-conda --experiment-id 5 -P epoch=2
PostgreSQL server style
server = f'{dialect_driver}://{username}:{password}@{ip}/{database_name}' """
postgres_connection_type = { 'direct': ('5432', 'data7-db1.cyverse.org'),
'ssh-tunnel': ('5000', 'localhost')
}
port, host = postgres_connection_type['ssh-tunnel'] # 'direct' , 'ssh-tunnel'
username = "artinmajdi"
password = '<PASSWORD>'
database_name = "chest_db_v2"
dialect_driver = 'postgresql'
server = f'{dialect_driver}://{username}:{password}@{host}:{port}/{database_name}'
Artifacts = { 'hpc': 'sftp://mohammadsmajdi@file<EMAIL>iz<EMAIL>.<EMAIL>:/home/u29/mohammadsmajdi/projects/mlflow/artifact_store',
'data7_db1': 'sftp://[email protected]:/home/artinmajdi/mlflow_data/artifact_store'} # :temp2_data7_b
return server, Artifacts['data7_db1']
def architecture(architecture_name: str='DenseNet121', input_shape: list=[224,224,3], num_classes: int=14):
input_tensor=tf.keras.layers.Input(input_shape)
if architecture_name == 'custom':
model = tf.keras.layers.Conv2D(4, kernel_size=(3,3), activation='relu')(input_tensor)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(16, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(32, activation='relu')(model)
model = tf.keras.layers.Dense(num_classes , activation='softmax')(model)
return tf.keras.models.Model(inputs=model.input, outputs=[model])
else:
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
pooling='avg'
weights='imagenet'
include_top=False
if architecture_name == 'xception': model_architecture = tf.keras.applications.Xception
elif architecture_name == 'VGG16': model_architecture = tf.keras.applications.VGG16
elif architecture_name == 'VGG19': model_architecture = tf.keras.applications.VGG19
elif architecture_name == 'ResNet50': model_architecture = tf.keras.applications.ResNet50
elif architecture_name == 'ResNet50V2': model_architecture = tf.keras.applications.ResNet50V2
elif architecture_name == 'ResNet101': model_architecture = tf.keras.applications.ResNet101
elif architecture_name == 'ResNet101V2': model_architecture = tf.keras.applications.ResNet101V2
elif architecture_name == 'ResNet152': model_architecture = tf.keras.applications.ResNet152
elif architecture_name == 'ResNet152V2': model_architecture = tf.keras.applications.ResNet152V2
elif architecture_name == 'InceptionV3': model_architecture = tf.keras.applications.InceptionV3
elif architecture_name == 'InceptionResNetV2': model_architecture = tf.keras.applications.InceptionResNetV2
elif architecture_name == 'MobileNet': model_architecture = tf.keras.applications.MobileNet
elif architecture_name == 'MobileNetV2': model_architecture = tf.keras.applications.MobileNetV2
elif architecture_name == 'DenseNet121': model_architecture = tf.keras.applications.DenseNet121
elif architecture_name == 'DenseNet169': model_architecture = tf.keras.applications.DenseNet169
elif architecture_name == 'DenseNet201': model_architecture = tf.keras.applications.DenseNet201
elif int(list(tf.keras.__version__)[2]) >= 4:
if architecture_name == 'EfficientNetB0': model_architecture = tf.keras.applications.EfficientNetB0
elif architecture_name == 'EfficientNetB1': model_architecture = tf.keras.applications.EfficientNetB1
elif architecture_name == 'EfficientNetB2': model_architecture = tf.keras.applications.EfficientNetB2
elif architecture_name == 'EfficientNetB3': model_architecture = tf.keras.applications.EfficientNetB3
elif architecture_name == 'EfficientNetB4': model_architecture = tf.keras.applications.EfficientNetB4
elif architecture_name == 'EfficientNetB5': model_architecture = tf.keras.applications.EfficientNetB5
elif architecture_name == 'EfficientNetB6': model_architecture = tf.keras.applications.EfficientNetB6
elif architecture_name == 'EfficientNetB7': model_architecture = tf.keras.applications.EfficientNetB7
model = model_architecture( weights = weights,
include_top = include_top,
input_tensor = input_tensor,
input_shape = input_shape,
pooling = pooling) # ,classes=num_classes
KK = tf.keras.layers.Dense( num_classes, activation='sigmoid', name='predictions' )(model.output)
return tf.keras.models.Model(inputs=model.input,outputs=KK)
def weighted_bce_loss(W):
def func_loss(y_true,y_pred):
NUM_CLASSES = y_pred.shape[1]
loss = 0
for d in range(NUM_CLASSES):
y_true = tf.cast(y_true, tf.float32)
mask = tf.keras.backend.cast( tf.keras.backend.not_equal(y_true[:,d], -5),
tf.keras.backend.floatx() )
loss += W[d]*tf.keras.losses.binary_crossentropy( y_true[:,d] * mask,
y_pred[:,d] * mask )
return tf.divide( loss, tf.cast(NUM_CLASSES,tf.float32) )
return func_loss
def optimize(dir, train_dataset, valid_dataset, epochs, Info, architecture_name):
# architecture
model = architecture( architecture_name = architecture_name,
input_shape = list(Info.target_size) + [3] ,
num_classes = len(Info.pathologies) )
model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
loss = weighted_bce_loss(Info.class_weights), # tf.keras.losses.binary_crossentropy
metrics = [tf.keras.metrics.binary_accuracy] )
# optimization
history = model.fit( train_dataset,
validation_data = valid_dataset,
epochs = epochs,
steps_per_epoch = Info.steps_per_epoch,
validation_steps = Info.validation_steps,
verbose = 1,
use_multiprocessing = True) # ,callbacks=func_CallBacks(dir + '/model')
# saving the optimized model
model.save( dir + '/model/model.h5',
overwrite = True,
include_optimizer = False )
return model
def evaluate(dir: str, dataset: str='chexpert', batch_size: int=1000, model=tf.keras.Model()):
# Loading the data
Data, Info = load_data.load_chest_xray( dir = dir,
dataset = dataset,
batch_size = batch_size,
mode = 'test' )
score = measure_loss_acc_on_test_data( generator = Data.generator['test'],
model = model,
pathologies = Info.pathologies )
return score
def measure_loss_acc_on_test_data(generator, model, pathologies):
# Looping over all test samples
score_values = {}
NUM_CLASSES = len(pathologies)
generator.reset()
for j in tqdm(range(len(generator.filenames))):
x_test, y_test = next(generator)
full_path, x,y = generator.filenames[j] , x_test[0,...] , y_test[0,...]
x,y = x[np.newaxis,:] , y[np.newaxis,:]
# Estimating the loss & accuracy for instance
eval = model.evaluate(x=x, y=y,verbose=0,return_dict=True)
# predicting the labels for instance
pred = model.predict(x=x,verbose=0)
# Measuring the loss for each class
loss_per_class = [ tf.keras.losses.binary_crossentropy(y[...,d],pred[...,d]) for d in range(NUM_CLASSES)]
# saving all the infos
score_values[full_path] = {'full_path':full_path,'loss_avg':eval['loss'], 'acc_avg':eval['binary_accuracy'], 'pred':pred[0], 'pred_binary':pred[0] > 0.5, 'truth':y[0]>0.5, 'loss':np.array(loss_per_class), 'pathologies':pathologies}
# converting the outputs into panda dataframe
df = pd.DataFrame.from_dict(score_values).T
# resetting the index to integers
df.reset_index(inplace=True)
# # dropping the old index column
df = df.drop(['index'],axis=1)
return df
class Parent_Child():
def __init__(self, subj_info: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
"""
subject_info = {'pred':[], 'loss':[], 'pathologies':['Edema','Cardiomegaly',...]}
1. After creating a class:
SPC = Parent_Child(loss_dict, pred_dict, technique)
2. Update the parent child relationship:
SPC.set_parent_child_relationship(parent_name1, child_name_list1)
SPC.set_parent_child_relationship(parent_name2, child_name_list2)
3. Then update the loss and probabilities
SPC.update_loss_pred()
4. In order to see the updated loss and probabilities use below
loss_new_list = SPC.loss_dict_weighted or SPC.loss_list_weighted
pred_new_list = SPC.pred_dict_weighted or SPC.predlist_weighted
IMPORTANT NOTE:
If there are more than 2 generation; it is absolutely important to enter the subjects in order of seniority
gen1: grandparent (gen1)
gen1_subjx_children: parent (gen2)
gen2_subjx_children: child (gen3)
SPC = Parent_Child(loss_dict, pred_dict, technique)
SPC.set_parent_child_relationship(gen1_subj1, gen1_subj1_children)
SPC.set_parent_child_relationship(gen1_subj2, gen1_subj2_children)
. . .
SPC.set_parent_child_relationship(gen2_subj1, gen2_subj1_children)
SPC.set_parent_child_relationship(gen2_subj2, gen2_subj2_children)
. . .
SPC.update_loss_pred()
"""
self.subj_info = subj_info
self.technique = technique
self.all_parents: dict = {}
self.tuning_variables = tuning_variables
self.loss = subj_info.loss
self.pred = subj_info.pred
self.truth = subj_info.truth
self._convert_inputs_list_to_dict()
def _convert_inputs_list_to_dict(self):
self.loss_dict = {disease:self.subj_info.loss[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.pred_dict = {disease:self.subj_info.pred[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.truth_dict = {disease:self.subj_info.truth[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.loss_dict_weighted = self.loss_dict
self.pred_dict_weighted = self.pred_dict
def set_parent_child_relationship(self, parent_name: str='parent_name', child_name_list: list=[]):
self.all_parents[parent_name] = child_name_list
def update_loss_pred(self):
"""
techniques:
1: coefficinet = (1 + parent_loss)
2: coefficinet = (2 * parent_pred)
3: coefficient = (2 * parent_pred)
1: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
2: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
3. loss_new = loss_old * coefficient
"""
for parent_name in self.all_parents:
self._update_loss_for_children(parent_name)
self._convert_outputs_to_list()
def _convert_outputs_to_list(self):
self.loss_new = np.array([self.loss_dict_weighted[disease] for disease in self.subj_info.pathologies])
self.pred_new = np.array([self.pred_dict_weighted[disease] for disease in self.subj_info.pathologies])
def _update_loss_for_children(self, parent_name: str='parent_name'):
parent_loss = self.loss_dict_weighted[parent_name]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
TV = self.tuning_variables[ self.technique ]
if TV['mode'] == 'truth': parent_truth_pred = parent_truth
elif TV['mode'] == 'pred': parent_truth_pred = parent_pred
else: parent_truth_pred = 1.0
if self.technique == 1: coefficient = TV['weight'] * parent_loss + TV['bias']
elif self.technique == 2: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
elif self.technique == 3: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
for child_name in self.all_parents[parent_name]:
new_child_loss = self._measure_new_child_loss(coefficient, parent_name, child_name)
self.loss_dict_weighted[child_name] = new_child_loss
self.pred_dict_weighted[child_name] = 1 - np.power(e_VALUE , -new_child_loss)
self.pred_dict[child_name] = 1 - np.power(e_VALUE , -self.loss_dict[child_name])
def _measure_new_child_loss(self, coefficient: float=0.0, parent_name: str='parent_name', child_name: str='child_name'):
TV = self.tuning_variables[ self.technique ]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
if TV['mode'] == 'truth': loss_activated = (parent_truth < 0.5 )
elif TV['mode'] == 'pred': loss_activated = (parent_pred < TV['parent_pred_threshold'] )
else: loss_activated = True
old_child_loss = self.loss_dict_weighted[child_name]
if self.technique == 1: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 2: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 3: new_child_loss = old_child_loss * coefficient
return new_child_loss
class Measure_InterDependent_Loss_Aim1_1(Parent_Child):
def __init__(self,score: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
score['loss_new'] = score['loss']
score['pred_new'] = score['pred']
self.score = score
self.technique = technique
for subject_ix in tqdm(self.score.index):
Parent_Child.__init__(self, subj_info=self.score.loc[subject_ix], technique=technique, tuning_variables=tuning_variables)
self.set_parent_child_relationship(parent_name='Lung Opacity' , child_name_list=['Pneumonia', 'Atelectasis','Consolidation','Lung Lesion', 'Edema'])
self.set_parent_child_relationship(parent_name='Enlarged Cardiomediastinum', child_name_list=['Cardiomegaly'])
self.update_loss_pred()
self.score.loss_new.loc[subject_ix] = self.loss_new
self.score.pred_new.loc[subject_ix] = self.pred_new
def apply_new_loss_techniques_aim1_1(pathologies: list=[], score: pd.DataFrame.dtypes={}, tuning_variables: dict={}):
L = len(pathologies)
accuracies = np.zeros((4,L))
measured_auc = np.zeros((4,L))
FR = list(np.zeros(4))
for technique in range(4):
# extracting the ouput predictions
if technique == 0:
FR[technique] = score
output = score.pred
else:
FR[technique] = Measure_InterDependent_Loss_Aim1_1(score=score, technique=technique, tuning_variables=tuning_variables)
output = FR[technique].score.pred_new
# Measuring accuracy
func = lambda x1, x2: [ (x1[j] > 0.5) == (x2[j] > 0.5) for j in range(len(x1))]
pred_acc = score.truth.combine(output,func=func).to_list()
pred_acc = np.array(pred_acc).mean(axis=0)
prediction_table = np.stack(score.pred)
truth_table = np.stack(score.truth)
for d in range(prediction_table.shape[1]):
fpr, tpr, thresholds = roc_curve(truth_table[:,d], prediction_table[:,d], pos_label=1)
measured_auc[technique, d] = auc(fpr, tpr)
accuracies[technique,:] = np.floor( pred_acc*1000 ) / 10
class Outputs:
def __init__(self,accuracies, measured_auc, FR, pathologies):
self.accuracy = self._converting_to_dataframe(input_table=accuracies , columns=pathologies)
self.auc = self._converting_to_dataframe(input_table=measured_auc, columns=pathologies)
self.details = FR
self.pathologies = pathologies
def _converting_to_dataframe(self, input_table, columns):
df = pd.DataFrame(input_table, columns=columns)
df['technique'] = ['original','1','2','3']
df = df.set_index('technique').T
return df
return Outputs(accuracies=accuracies, measured_auc=measured_auc, FR=FR,pathologies=pathologies)
def apply_nan_back_to_truth(truth, how_to_treat_nans):
# changing teh samples with uncertain truth label to nan
truth[ truth == -10] = np.nan
# how to treat the nan labels in the original dataset before measuring the average accuracy
if how_to_treat_nans == 'ignore': truth[ truth == -5] = np.nan
elif how_to_treat_nans == 'pos': truth[ truth == -5] = 1
elif how_to_treat_nans == 'neg': truth[ truth == -5] = 0
return truth
def measure_mean_accruacy_chexpert(truth, prediction, how_to_treat_nans):
""" prediction & truth: num_samples x num_classes """
pred_classes = prediction > 0.5
# truth_nan_applied = self._truth_with_nan_applied()
truth_nan_applied = apply_nan_back_to_truth(truth=truth, how_to_treat_nans=how_to_treat_nans)
# measuring the binary truth labels (the nan samples will be fixed below)
truth_binary = truth_nan_applied > 0.5
truth_pred_compare = (pred_classes == truth_binary).astype(float)
# replacing the nan samples back to their nan value
truth_pred_compare[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
accuracy = np.nanmean(truth_pred_compare, axis=0)*100
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
accuracy[np.isnan(accuracy)] = 0
accuracy = (accuracy*10).astype(int)/10
return accuracy
def measure_mean_uncertainty_chexpert(truth=np.array([]), uncertainty=np.array([]), how_to_treat_nans='ignore'):
""" uncertainty & truth: num_samples x num_classes """
# adding the nan values back to arrays
truth_nan_applied = apply_nan_back_to_truth(truth, how_to_treat_nans)
# replacing the nan samples back to their nan value
uncertainty[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
uncertainty_mean = np.nanmean(uncertainty , axis=0)
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
uncertainty_mean[np.isnan(uncertainty_mean)] = 0
uncertainty_mean = (uncertainty_mean*1000).astype(int)/1000
return uncertainty_mean
class Measure_Accuracy_Aim1_2():
def __init__(self, predict_accuracy_mode: bool=False , model: tf.keras.models.Model.dtype='' , generator=tf.keras.preprocessing.image.ImageDataGenerator() , how_to_treat_nans: str='ignore', uncertainty_type: str='std'):
"""
how_to_treat_nans:
ignore: ignoring the nan samples when measuring the average accuracy
pos: if integer number, it'll treat as postitive
neg: if integer number, it'll treat as negative """
self.predict_accuracy_mode = predict_accuracy_mode
self.how_to_treat_nans = how_to_treat_nans
self.generator = generator
self.model = model
self.uncertainty_type = uncertainty_type
self._setting_params()
def _setting_params(self):
self.full_data_length, self.num_classes = self.generator.labels.shape
self.batch_size = self.generator.batch_size
self.number_batches = int(np.ceil(self.full_data_length/self.batch_size))
self.truth = self.generator.labels.astype(float)
def loop_over_whole_dataset(self):
probs = np.zeros(self.generator.labels.shape)
# Looping over all batches
# Keras_backend.clear_session()
self.generator.reset()
np.random.seed(1)
for batch_index in tqdm(range(self.number_batches),disable=False):
# extracting the indexes for batch "batch_index"
self.generator.batch_index = batch_index
indexes = next(self.generator.index_generator)
# print(' extracting data -------')
self.generator.batch_index = batch_index
x, _ = next(self.generator)
# print(' predicting the labels -------')
probs[indexes,:] = self.model.predict(x,verbose=0)
# Measuring the accuracy over whole augmented dataset
if self.predict_accuracy_mode:
accuracy = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=probs.copy(), how_to_treat_nans=self.how_to_treat_nans)
return probs, accuracy
def loop_over_all_augmentations(self,number_augmentation: int=0):
self.number_augmentation = number_augmentation
self.probs_all_augs_3d = np.zeros((1 + number_augmentation , self.full_data_length , self.num_classes))
self.accuracy_all_augs_3d = np.zeros((1 + number_augmentation , self.num_classes))
# Looping over all augmentation scenarios
for ix_aug in range(number_augmentation):
print(f'augmentation {ix_aug}/{number_augmentation}')
probs, accuracy = self.loop_over_whole_dataset()
self.probs_all_augs_3d[ ix_aug,...] = probs
self.accuracy_all_augs_3d[ix_aug,...] = accuracy
# measuring the average probability over all augmented data
self.probs_avg_2d = np.mean( self.probs_all_augs_3d, axis=0)
if self.uncertainty_type == 'std':
self.probs_std_2d = np.std(self.probs_all_augs_3d, axis=0)
# Measuring the accruacy for new estimated probability for each sample over all augmented data
# self.accuracy_final = self._measure_mean_accruacy(self.probs_avg_2d)
# self.uncertainty_final = self._measure_mean_std(self.probs_std_2d)
self.accuracy_final = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=self.probs_avg_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
self.uncertainty_final = measure_mean_uncertainty_chexpert(truth=self.truth.copy(), uncertainty=self.probs_std_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
def apply_technique_aim_1_2(how_to_treat_nans='ignore', data_generator='', data_generator_aug='', model='', number_augmentation=3, uncertainty_type='std'):
print('running the evaluation on original non-augmented data')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
probs_2d_orig, old_accuracy = MA.loop_over_whole_dataset()
print(' running the evaluation on augmented data including the uncertainty measurement')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator_aug,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
MA.loop_over_all_augmentations(number_augmentation=number_augmentation)
final_results = { 'old-accuracy': old_accuracy,
'new-accuracy': MA.accuracy_final,
'std' : MA.uncertainty_final}
return probs_2d_orig, final_results, MA
def estimate_maximum_and_change(all_accuracies=np.array([]), pathologies=[]):
columns = ['old-accuracy', 'new-accuracy', 'std']
# creating a dataframe from accuracies
df = pd.DataFrame(all_accuracies , index=pathologies)
# adding the 'maximum' & 'change' columns
df['maximum'] = df.columns[ df.values.argmax(axis=1) ]
df['change'] = df[columns[1:]].max(axis=1) - df[columns[0]]
# replacing "0" values to "--" for readability
df.maximum[df.change==0.0] = '--'
df.change[df.change==0.0] = '--'
return df
# def apply_technique_aim_1_2_with_dataframe(how_to_treat_nans='ignore', pathologies=[], data_generator='', data_generator_aug='', model='', uncertainty_type='std'):
# outputs, MA = apply_technique_aim_1_2(how_to_treat_nans=how_to_treat_nans, data_generator=data_generator, data_generator_aug=data_generator_aug, model=model, uncertainty_type=uncertainty_type)
# df = estimate_maximum_and_change(all_accuracies=outputs, pathologies=pathologies)
# return df, outputs, MA
""" crowdsourcing technique aim 1_3 """
def apply_technique_aim_1_3(data={}, num_simulations=20, feature_columns=[], ARLS={}):
def assigning_worker_true_labels(seed_num=1, true=[], labelers_strength=0.5):
# setting the random seed
# np.random.seed(seed_num)
# number of samples and labelers/workers
num_samples = true.shape[0]
# finding a random number for each instance
true_label_assignment_prob = np.random.random(num_samples)
# samples that will have an inaccurate true label
false_samples = true_label_assignment_prob < 1 - labelers_strength
# measuring the new labels for each labeler/worker
worker_true = true > 0.5
worker_true[ false_samples ] = ~ worker_true[ false_samples ]
return worker_true
def assigning_random_labelers_strengths(num_labelers=10, low_dis=0.3, high_dis=0.9):
labeler_names = [f'labeler_{j}' for j in range(num_labelers)]
# if num_labelers > 1:
# ls1 = np.random.uniform( low = 0.1,
# high = 0.3,
# size = int(num_labelers/2))
# ls2 = np.random.uniform( low = 0.7,
# high = 0.9,
# size = num_labelers - int(num_labelers/2))
# labelers_strength = np.concatenate((ls1 , ls2),axis=0)
# else:
labelers_strength = np.random.uniform( low = low_dis,
high = high_dis,
size = num_labelers)
return pd.DataFrame( {'labelers_strength': labelers_strength}, index = labeler_names)
# TODO I should repeate this for multiple seed and average
np.random.seed(11)
# setting a random strength for each labeler/worker
labelers_strength = assigning_random_labelers_strengths( num_labelers = ARLS['num_labelers'],
low_dis = ARLS['low_dis'],
high_dis = ARLS['high_dis'])
predicted_labels_all_sims = {'train':{}, 'test':{}}
true_labels = {'train':pd.DataFrame(), 'test':pd.DataFrame()}
uncertainty = {'train':pd.DataFrame(), 'test':pd.DataFrame()}
for LB_index, LB in enumerate(tqdm(labelers_strength.index, desc='workers')):
# Initializationn
for mode in ['train', 'test']:
predicted_labels_all_sims[mode][LB] = {}
true_labels[mode]['truth'] = data[mode].true.copy()
""" Looping over all simulations. this is to measure uncertainty """
# extracting the simulated true labels based on the worker strength
true_labels['train'][LB] = assigning_worker_true_labels( seed_num = 0, # LB_index,
true = data['train'].true.values,
labelers_strength = labelers_strength.T[LB].values )
true_labels['test'][LB] = assigning_worker_true_labels( seed_num = 0, # LB_index,
true = data['test'].true.values,
labelers_strength = labelers_strength.T[LB].values )
for i in range(num_simulations):
# training a random forest on the aformentioned labels
RF = RandomForestClassifier( n_estimators = 5,
max_depth = 10,
random_state = i)
RF.fit( X = data['train'][feature_columns],
y = true_labels['train'][LB] )
# predicting the labels using trained networks for both train and test data
for mode in ['train', 'test']:
predicted_labels_all_sims[mode][LB][f'simulation_{i}'] = RF.predict( data[mode][feature_columns] )
# measuring the prediction and uncertainty values after MV over all simulations
for mode in ['train', 'test']:
# converting to dataframe
predicted_labels_all_sims[mode][LB] =
|
pd.DataFrame(predicted_labels_all_sims[mode][LB], index=data[mode].index)
|
pandas.DataFrame
|
from config import Config
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from dataset import ImageDataSet
from tqdm import tqdm
import time
from model import EAST
from loss import LossFunction
from utils import non_maximal_supression, draw_bbs, reverse_shift
import math
import cv2
do_eval_trainset = False
do_eval_devset = True
config = {k:v for k,v in vars(Config).items() if not k.startswith("__")}
geometry = config['geometry']
label_method = config['label_method']
use_formatted_data = config['use_formatted_data']
train_data_dir = config['train_data_dir']
dev_data_dir = config['dev_data_dir']
cuda = config['cuda']
smoothed_l1_loss_beta = config["smoothed_l1_loss_beta"]
trained_model_file = config['trained_model_file']
eval_mini_batch_size = config['eval_mini_batch_size']
score_threshold = config['score_threshold']
iou_threshold = config['iou_threshold']
max_boxes = config['max_boxes']
representation = geometry + "_" + label_method
model = EAST(geometry=geometry)
loss_function = LossFunction()
if cuda:
model.cuda()
loss_function.cuda()
model.load_state_dict(torch.load(trained_model_file))
model.eval()
def eval_dataset(data_dir):
data_images_dir = os.path.join(data_dir, "images")
data_annotations_dir = os.path.join(data_dir, "annotations")
if use_formatted_data:
data_annotations_formatted_dir = data_annotations_dir + "_" + representation
data_images_pred_dir = os.path.join(data_dir, "images_pred")
data_annotations_pred_dir = os.path.join(data_dir, "annotations_pred")
if not os.path.exists(data_images_pred_dir):
os.mkdir(data_images_pred_dir)
if not os.path.exists(data_annotations_pred_dir):
os.mkdir(data_annotations_pred_dir)
dataset = ImageDataSet(data_images_dir, data_annotations_formatted_dir)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=eval_mini_batch_size, shuffle=True)
score_loss, geometry_loss, loss = 0, 0, 0
boxes_pred = []
n_mini_batches = math.ceil(len(dataset)/eval_mini_batch_size)
for i, data_egs in tqdm(enumerate(data_loader, start=1), total=n_mini_batches, desc="Evaluating Mini Batches:"):
image_names, images, score_maps, geometry_maps = data_egs
if cuda:
images = images.cuda()
score_maps = score_maps.cuda()
geometry_maps = geometry_maps.cuda()
score_maps_pred, geometry_maps_pred = model.forward(images)
mini_batch_loss = loss_function.compute_loss(score_maps.double(),
score_maps_pred.double(),
geometry_maps.double(),
geometry_maps_pred.double(),
smoothed_l1_loss_beta = smoothed_l1_loss_beta)
mini_batch_loss_of_score_item = loss_function.loss_of_score.item()
mini_batch_loss_of_geometry_item = loss_function.loss_of_geometry.item()
mini_batch_loss_item = mini_batch_loss.item()
score_loss += mini_batch_loss_of_score_item
geometry_loss += mini_batch_loss_of_geometry_item
loss += mini_batch_loss_item
score_maps_pred = score_maps_pred.cpu().numpy()
geometry_maps_pred = geometry_maps_pred.cpu().numpy()
if representation == "QUAD_multiple":
geometry_maps_pred = reverse_shift(geometry_maps_pred) # [8, 128, 128]
#print("NMS Started")
nms_tic = time.time()
mini_batch_boxes_pred = non_maximal_supression(score_maps_pred,
geometry_maps_pred,
score_threshold=score_threshold,
iou_threshold=iou_threshold,
max_boxes=max_boxes)
nms_toc = time.time()
elapsed_time = time.strftime("%H:%M:%S", time.gmtime(nms_toc - nms_tic))
#print("NMS Ended", "Duration", toc-tic)
boxes_pred.extend(mini_batch_boxes_pred)
for image_name, eg_boxes_pred in zip(image_names, mini_batch_boxes_pred):
annotation_name = image_name.split(".")[0] + ".csv"
image_path = os.path.join(data_images_dir, image_name)
annotation_path = os.path.join(data_annotations_dir, annotation_name)
image_pred_path = os.path.join(data_images_pred_dir, image_name)
annotation_pred_path = os.path.join(data_annotations_pred_dir, annotation_name)
image = cv2.imread(image_path)
geometry_map = pd.read_csv(annotation_path, header=None).iloc[:,:-1].values.tolist()
image = draw_bbs(image, geometry_map, color=(255, 0, 0)) #BGR
image = draw_bbs(image, eg_boxes_pred, color=(0, 0, 255)) #BGR
cv2.imwrite(image_pred_path, image)
eg_boxes_pred =
|
pd.DataFrame(eg_boxes_pred)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 17:19:41 2020
@author: <NAME>
"""
import pandas as pd
def int_br(x):
return int(x.replace('.',''))
def float_br(x):
return float(x.replace('.', '').replace(',','.'))
dia = '2805'
file_HU = '~/ownCloud/sesab/exporta_boletim_epidemiologico_csv_{}.csv'.format(dia)
datahu = pd.read_csv(file_HU, sep=';', decimal=',', converters={'CASOS CONFIRMADOS': int_br})
rday = 'DATA DO BOLETIM'
datahu[rday] = pd.to_datetime(datahu[rday], dayfirst=True)
datahu['DayNum'] = datahu[rday].dt.dayofyear
ref = pd.Timestamp(year=2020, month=2, day=27).dayofyear
datahu['ts0'] = datahu['DayNum'] - ref
colsutils = ['DATA DO BOLETIM', 'ts0', 'CASOS CONFIRMADOS', 'CASOS ENFERMARIA',
'CASOS UTI','TOTAL OBITOS']
dfi = datahu[colsutils]
dff = pd.DataFrame(columns=colsutils)
for i, day in enumerate(dfi['ts0'].unique()):
line = dfi[dfi['ts0'] == day]
line = line.sort_values(by=['DATA DO BOLETIM'], ascending=False)
line.reset_index(drop=True, inplace=True)
dff.loc[i] = line.loc[0]
cols = ['dates', 'ts0', 'infec', 'leitos', 'uti', 'dthcm']
dff.columns = cols
df0 =
|
pd.read_csv('data_0.csv')
|
pandas.read_csv
|
import pandas as pd
def cleaner(extractor):
tables = extractor.get_all_tables()
df = extractor.get_column_list(tables)
df['default'] = df['default'].fillna('0')
df['default'][df['default'].str.contains('next', na=False)] = '1'
df['default'][df['default'].isin(['0', '1']) == False] = '2'
df = pd.concat([df, pd.get_dummies(df['default'])], axis=1)
df = pd.concat([df,
|
pd.get_dummies(df['type'])
|
pandas.get_dummies
|
#########
# GLOBALS
#########
from itertools import islice
import pandas as pd
import dateutil.parser as dp
from scipy.stats import boxcox
from realtime_talib import Indicator
#from nltk import word_tokenize
#from nltk.corpus import stopwords
#from nltk.stem.porter import *
#from scipy.integrate import simps
#from sklearn.model_selection import train_test_split
#from sklearn.utils import resample
#from selenium import webdriver
RANDOM_STATE = 42
#######################
# GENERAL PREPROCESSORS
#######################
def calculate_indicators(ohlcv_df):
ohlcv_df = ohlcv_df.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv_df.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv_df = ohlcv_df.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int(dp.parse(temp_ohlcv_df.iloc[index]["Date"]).strftime('%s')) for index in range(temp_ohlcv_df.shape[0])]
temp_ohlcv_df["Date"] = pd.Series(unix_times).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv_df.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv_df = temp_ohlcv_df.iloc[::-1]
# Rate of Change Ratio
rocr3 = Indicator(temp_ohlcv_df, "ROCR", 3).getHistorical()[::-1]
rocr6 = Indicator(temp_ohlcv_df, "ROCR", 6).getHistorical()[::-1]
# Average True Range
atr = Indicator(temp_ohlcv_df, "ATR", 14).getHistorical()[::-1]
# On-Balance Volume
obv = Indicator(temp_ohlcv_df, "OBV").getHistorical()[::-1]
# Triple Exponential Moving Average
trix = Indicator(temp_ohlcv_df, "TRIX", 20).getHistorical()[::-1]
# Momentum
mom1 = Indicator(temp_ohlcv_df, "MOM", 1).getHistorical()[::-1]
mom3 = Indicator(temp_ohlcv_df, "MOM", 3).getHistorical()[::-1]
# Average Directional Index
adx14 = Indicator(temp_ohlcv_df, "ADX", 14).getHistorical()[::-1]
adx20 = Indicator(temp_ohlcv_df, "ADX", 20).getHistorical()[::-1]
# Williams %R
willr = Indicator(temp_ohlcv_df, "WILLR", 14).getHistorical()[::-1]
# Relative Strength Index
rsi6 = Indicator(temp_ohlcv_df, "RSI", 6).getHistorical()[::-1]
rsi12 = Indicator(temp_ohlcv_df, "RSI", 12).getHistorical()[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = Indicator(
temp_ohlcv_df, "MACD", 12, 26, 9).getHistorical()
macd, macd_signal, macd_hist = macd[::-
1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = Indicator(temp_ohlcv_df, "MA", 6, 1).getHistorical()[::-1]
ema12 = Indicator(temp_ohlcv_df, "MA", 12, 1).getHistorical()[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr),
len(rsi6), len(rsi12), len(macd), len(
macd_signal), len(macd_hist),
len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv_df = ohlcv_df[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv_df["MOM (1)"] = pd.Series(mom1[:min_length]).values
ohlcv_df["MOM (3)"] = pd.Series(mom3[:min_length]).values
ohlcv_df["ADX (14)"] = pd.Series(adx14[:min_length]).values
ohlcv_df["ADX (20)"] = pd.Series(adx20[:min_length]).values
ohlcv_df["WILLR"] =
|
pd.Series(willr[:min_length])
|
pandas.Series
|
"""
This script will be used to get the batch status and also to gather container resource usage if specified.
Examples:
python3 get_batch_status.py -j "default-gwf-core-dev-sv-fsx-SCRATCH" -s "2022-03-23 11:47:00"
python3 get_batch_status.py -j "default-gwf-core-dev-sv-fsx-SCRATCH" -s "2022-03-23 11:47:00" -l "/aws/ecs/containerinsights/spot-gwf-core-dev-sv-fsx-SCRATCH_Batch_64d2263e-1ba8-3c96-a34f-2d6ab90f1ebc/performance"
python3 get_batch_status.py -j "default-gwf-core-dev-sv-fsx-SCRATCH" -s "2022-03-24 20:00:00" -l "/aws/ecs/containerinsights/spot-gwf-core-dev-sv-fsx-SCRATCH_Batch_64d2263e-1ba8-3c96-a34f-2d6ab90f1ebc/performance"
python3 get_batch_status.py -j "default-gwf-core-dev-sv-fsx-SCRATCH" -s "2022-03-24 20:00:00" -l "/aws/ecs/containerinsights/spot-gwf-core-dev-sv-fsx-SCRATCH_Batch_64d2263e-1ba8-3c96-a34f-2d6ab90f1ebc/performance" -i 10000
"""
import boto3
import pandas as pd
import datetime, time
import numpy as np
import os
import argparse
import logging
import ast
# Initialise Logger
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger("batch-status-report")
handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_args():
"""
Use the argparse library to provide special help text and simplify argument handling
:return: tuple
"""
parser = argparse.ArgumentParser(description='Run Batch Status. This can take multiple arguments.')
parser.add_argument('-p', '--aws_profile', dest='aws_profile', required=False, default="default",
help="The AWS Profile to be used.")
parser.add_argument('-r', '--aws_region', dest='aws_region', required=False, default="us-east-2",
help="The AWS Region to be used.")
parser.add_argument('-j', '--batch_job_queue', dest='batch_job_queue', required=True,
help="The AWS Batch Job Queue which needs to be looked at for jobs.")
parser.add_argument('-s', '--batch_start_time', dest='batch_start_time', required=True,
help="Provide the Batch Start Time after which the jobs are needed to be looked upon. Should be in YYYY-MM-DD HH:MM:SS format only")
parser.add_argument('-l', '--cloudwatch_log_group_name', dest='cloudwatch_log_group_name', required=False, default=None,
help="This is needed only when the cloudwatch container insights is enabled as per : https://docs.opendata.aws/genomics-workflows/orchestration/cost-effective-workflows/cost-effective-workflows.html .")
parser.add_argument('-i', '--cloudwatch_query_interval', dest='cloudwatch_query_interval', required=False, default=600, type=int,
help="The interval is the duration (secs) for which the query needs to be executed. This shouldn't be more coz the query only returns 10k records and Cloudwatch saves log for each job per minute. Calculate accordingly. ")
args = parser.parse_args()
return args.aws_profile, args.aws_region, args.batch_job_queue, args.batch_start_time, args.cloudwatch_log_group_name, args.cloudwatch_query_interval
def get_batch_job_id_list():
"""This function will just get the job ids as per status
Returns:
List: List of all Job Ids from these 3 statuses : 'SUCCEEDED', 'FAILED', 'RUNNING'
"""
status_needed = ['SUCCEEDED', 'FAILED', 'RUNNING']
logger.info("Running for following statuses only : %s " % str(status_needed))
epoch_batch_start = time.mktime(datetime.datetime.fromisoformat(batch_start_time).timetuple()) * 1000
logger.info("Epoch Batch Start Time : %s" % epoch_batch_start)
job_id_list = []
for job_status in status_needed:
temp_job_ids = []
logger.info("Getting Jobs IDs with %s status" % job_status)
response = batch_client.list_jobs(
jobQueue=batch_job_queue,
jobStatus=job_status,
maxResults=123
)
for x in response['jobSummaryList']:
if x['createdAt'] > epoch_batch_start:
temp_job_ids.append(x['jobId'])
nextToken = response['nextToken'] if 'nextToken' in str(response) else None
# Paginate in loop
while nextToken != None:
response = batch_client.list_jobs(
jobQueue=batch_job_queue,
jobStatus=job_status,
maxResults=123,
nextToken=nextToken
)
for x in response['jobSummaryList']:
if x['createdAt'] > epoch_batch_start:
temp_job_ids.append(x['jobId'])
if 'nextToken' in response.keys():
nextToken = response['nextToken']
else:
nextToken = None
logger.info("No. of jobs with %s status : %s" % (job_status, len(temp_job_ids)))
job_id_list.extend(temp_job_ids)
logger.info("No. of Job Ids fetched : %s " % len(job_id_list))
return job_id_list
def get_job_details(job_id_list):
"""This function will return all the detail related to a job
Args:
job_id_list (List): The list of job ids for which the details are needed to be fetched.
Returns:
Pandas DF: The Pandas DF with all the job details.
"""
final_list = []
for job_id in job_id_list:
try:
response = batch_client.describe_jobs(jobs=[job_id])
job_status = response['jobs'][0]['status']
if job_status == "RUNNING":
container_instance_arns = [response['jobs'][0]['container']['containerInstanceArn'].split("/")[-1]]
task_ids = [response['jobs'][0]['container']['taskArn'].split("/")[-1]]
else:
container_instance_arns = list(set([i['container']['containerInstanceArn'].split("/")[-1] for i in response['jobs'][0]['attempts']]))
task_ids = list(set([i['container']['taskArn'].split("/")[-1] for i in response['jobs'][0]['attempts']]))
reasons = None
for i in response['jobs'][0]['attempts']:
if 'reason' in str(i):
reasons = i['container']['reason'] if not reasons else reasons + \
"|" + i['container']['reason']
final_list.append({
'job_id': job_id,
'job_name': response['jobs'][0]['jobName'],
'job_status': job_status,
'started_at': response['jobs'][0]['startedAt'] if 'startedAt' in response['jobs'][0] else None,
'stopped_at': response['jobs'][0]['stoppedAt'] if 'stoppedAt' in response['jobs'][0] else None,
"image": response['jobs'][0]['container']['image'],
"vcpus": response['jobs'][0]["container"]['vcpus'] if 'vcpus' in str(response['jobs'][0]["container"]) else response['jobs'][0]["container"]['resourceRequirements'][0]['value'],
"memory": response['jobs'][0]["container"]['memory'] if 'memory' in str(response['jobs'][0]["container"]) else response['jobs'][0]["container"]['resourceRequirements'][1]['value'],
'num_attempts': len(response['jobs'][0]['attempts']),
'num_of_instances': len(container_instance_arns),
'instance_arn_endings': '|'.join(container_instance_arns),
'task_id': '|'.join(task_ids),
"reasons": reasons
})
except:
logger.warning("The job id : %s failed parsing with response: %s" % (job_id, response))
df = pd.DataFrame(final_list)
df['started_at'] = pd.to_datetime(df['started_at'], errors="coerce", unit='ms')
df['stopped_at'] = pd.to_datetime(df['stopped_at'], errors="coerce", unit='ms')
return df
def get_resource_usage():
"""This function will fetch the cpu and mem usage from cloudwatch insights -> container Insights
Returns:
Pandas DF: The dataframe with the task and resource consumption details.
"""
query = "fields @message"
epoch_start = int(datetime.datetime.strptime(batch_start_time, "%Y-%m-%d %H:%M:%S").timestamp())
epoch_end = epoch_start + cloudwatch_query_interval
current_epoch_time = int(datetime.datetime.now().timestamp())
final_output = []
logger.info("Starting to query Cloud Watch Logs Group with an Interval : %s from start time : %s" % (cloudwatch_query_interval, batch_start_time))
logger.info("Please be patient, this may take sometime...")
try:
while epoch_end < current_epoch_time:
# logger.info("Query Input --- Start Time : %s --- End Time : %s" % (epoch_start, epoch_end))
start_query_response = log_client.start_query(
logGroupName=cloudwatch_log_group_name,
startTime=epoch_start,
endTime=epoch_end,
queryString=query,
limit=10000
)
query_id = start_query_response['queryId']
logger.info("The query id being executed is : %s" % query_id)
response = None
while response == None or response['status'] == 'Running':
# logger.info('Waiting for query to complete ...')
time.sleep(1)
response = log_client.get_query_results(queryId=query_id)
# logger.info("The number of records from query are : %s" % len(response['results']))
for record in response['results']:
temp_value = ast.literal_eval(record[0]['value'])
if temp_value['Type'] == 'Task':
final_output.append(
{
'task_id': temp_value['TaskId'],
'container_instance_id': temp_value['ContainerInstanceId'],
'cpu_used': temp_value['CpuUtilized'],
'cpu_passed': temp_value['CpuReserved'],
'memory_used': temp_value['MemoryUtilized'],
'memory_passed': temp_value['MemoryReserved']
}
)
epoch_start = epoch_end
epoch_end = epoch_start + cloudwatch_query_interval
except:
logger.error("The cloudwatch log parsing has failed. Kindly check.", exc_info=True)
final_df = pd.DataFrame(final_output)
if len(final_df) > 0:
final_df = final_df.groupby(['task_id', 'container_instance_id']).mean().reset_index()
final_df.to_csv("%s/task_resource_usage.csv" % output_path, index=False)
logger.info("The number of tasks for which resource usage was gathered is as : %s" % len(final_df))
return final_df
def get_summaries(job_details_df):
"""This function will gather the different levels of summaries
Args:
job_details_df (Pandas DF): The DF with all job details.
"""
job_modules_df = pd.read_csv("%s/../configs/job_names_and_modules.csv" % current_script_dir)
# Sub Module Level Summary
logger.info("Considering only Successful Jobs")
success_job_details_df = job_details_df[job_details_df['job_status'].isin(["SUCCEEDED"])]
temp_df = success_job_details_df[['job_name', 'started_at', 'stopped_at']]
temp_df['avg_duration_across_all_jobs'] = pd.to_datetime(temp_df['stopped_at'], infer_datetime_format=True) - pd.to_datetime(temp_df['started_at'], infer_datetime_format=True)
temp_df['avg_duration_across_all_jobs'] = temp_df['avg_duration_across_all_jobs'] / np.timedelta64(1, 'h')
temp_df = pd.merge(temp_df, job_modules_df, on="job_name", how="left")
submodule_summary_df = temp_df.groupby(['module_name', 'module_number', 'main_module_name']).agg({'started_at': np.min, 'stopped_at': np.max, 'avg_duration_across_all_jobs': np.average}).reset_index()
submodule_summary_df = pd.merge(submodule_summary_df, temp_df.value_counts(['main_module_name', 'module_name', 'module_number']).reset_index(name='job_counts'), on="module_name", how="left")
submodule_summary_df['duration'] = pd.to_datetime(submodule_summary_df['stopped_at'], infer_datetime_format=True) - pd.to_datetime(submodule_summary_df['started_at'], infer_datetime_format=True)
submodule_summary_df['duration'] = (submodule_summary_df['duration'] / np.timedelta64(1, 'h'))
submodule_summary_df = submodule_summary_df[['module_number_x', 'main_module_name_x', 'module_name', 'started_at', 'stopped_at', 'avg_duration_across_all_jobs', 'job_counts', 'duration']].sort_values(["module_number_x", "started_at"])
submodule_summary_df.to_csv("%s/submodule_summary.csv" % output_path, index=False)
logger.info("The submodule level summary was written to output directory.")
# Get High level Summary
highlevel_summary_df = submodule_summary_df.groupby(["module_number_x", 'main_module_name_x']).agg({'started_at': 'min', 'stopped_at': 'max', 'job_counts': 'sum'}).reset_index()
highlevel_summary_df['duration'] = (pd.to_datetime(highlevel_summary_df['stopped_at']) -
|
pd.to_datetime(highlevel_summary_df['started_at'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import psycopg2
from sklearn.model_selection import KFold
import Constants
import sys
from pathlib import Path
output_folder = Path(sys.argv[1])
output_folder.mkdir(parents=True, exist_ok=True)
# update database credentials if MIMIC data stored in postgres database
conn = psycopg2.connect(
"dbname=mimic user=darius host='/var/run/postgresql' password=password")
pats = pd.read_sql_query('''
select subject_id, gender, dob, dod from public.patients
''', conn)
n_splits = 12
pats = pats.sample(frac=1, random_state=42).reset_index(drop=True)
kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
for c, i in enumerate(kf.split(pats, groups=pats.gender)):
pats.loc[i[1], 'fold'] = str(c)
adm = pd.read_sql_query('''
select subject_id, hadm_id, insurance, language,
religion, ethnicity,
admittime, deathtime, dischtime,
HOSPITAL_EXPIRE_FLAG, DISCHARGE_LOCATION,
diagnosis as adm_diag
from public.admissions
''', conn)
df = pd.merge(pats, adm, on='subject_id', how='inner')
def merge_death(row):
if not(pd.isnull(row.deathtime)):
return row.deathtime
else:
return row.dod
df['dod_merged'] = df.apply(merge_death, axis=1)
notes = pd.read_sql_query('''
select category, chartdate, charttime, hadm_id, row_id as note_id, text from public.noteevents
where iserror is null
''', conn)
# drop all outpatients. They only have a subject_id, so can't link back to insurance or other fields
notes = notes[~(pd.isnull(notes['hadm_id']))]
df = pd.merge(left=notes, right=df, on='hadm_id', how='left')
df.ethnicity.fillna(value='UNKNOWN/NOT SPECIFIED', inplace=True)
others_set = set()
def cleanField(string):
mappings = {'HISPANIC OR LATINO': 'HISPANIC/LATINO',
'BLACK/AFRICAN AMERICAN': 'BLACK',
'UNABLE TO OBTAIN': 'UNKNOWN/NOT SPECIFIED',
'PATIENT DECLINED TO ANSWER': 'UNKNOWN/NOT SPECIFIED'}
bases = ['WHITE', 'UNKNOWN/NOT SPECIFIED', 'BLACK', 'HISPANIC/LATINO',
'OTHER', 'ASIAN']
if string in bases:
return string
elif string in mappings:
return mappings[string]
else:
for i in bases:
if i in string:
return i
others_set.add(string)
return 'OTHER'
df['ethnicity_to_use'] = df['ethnicity'].apply(cleanField)
df = df[df.chartdate >= df.dob]
ages = []
for i in range(df.shape[0]):
ages.append((df.chartdate.iloc[i] - df.dob.iloc[i]).days/365.24)
df['age'] = ages
df.loc[(df.category == 'Discharge summary') |
(df.category == 'Echo') |
(df.category == 'ECG'), 'fold'] = 'NA'
icds = (pd.read_sql_query('select * from public.diagnoses_icd', conn)
.groupby('hadm_id')
.agg({'icd9_code': lambda x: list(x.values)})
.reset_index())
df = pd.merge(left=df, right=icds, on='hadm_id')
def map_lang(x):
if x == 'ENGL':
return 'English'
if pd.isnull(x):
return 'Missing'
return 'Other'
df['language_to_use'] = df['language'].apply(map_lang)
for i in Constants.groups:
assert(i['name'] in df.columns), i['name']
acuities = pd.read_sql_query('''
select * from (
select a.subject_id, a.hadm_id, a.icustay_id, a.oasis, a.oasis_prob, b.sofa from
(public.oasis a
natural join public.sofa b )) ab
natural join
(select subject_id, hadm_id, icustay_id, sapsii, sapsii_prob from
public.sapsii) c
''', conn)
icustays = pd.read_sql_query('''
select subject_id, hadm_id, icustay_id, intime, outtime
from public.icustays
''', conn).set_index(['subject_id', 'hadm_id'])
def fill_icustay(row):
opts = icustays.loc[[row['subject_id'], row['hadm_id']]]
if pd.isnull(row['charttime']):
charttime = row['chartdate'] +
|
pd.Timedelta(days=2)
|
pandas.Timedelta
|
from django.shortcuts import render,redirect, get_object_or_404
from django.http import HttpResponse
from rest_framework import serializers
from . import Ml_model as ml
from . import Scraper as sc
from datetime import date
import datetime
import pandas as pd
from .models import Data, Update, Data_Predicted
from django.http import JsonResponse
from django.db.models import F, query
from rest_framework import generics
from .serializers import DataPredSerializers
from django_filters.rest_framework import DjangoFilterBackend, filterset
up_date = Update()
u = Update.objects.all().values()
class ListData(generics.ListAPIView):
queryset = Data_Predicted.objects.all()
# queryset = Data_Predicted.objects.all()
serializer_class = DataPredSerializers
filter_backends = [DjangoFilterBackend]
# filterset_fields = ['Date']
filterset_fields = ['Date','Date']
# def get_queryset(self):
# queryset1 = Data_Predicted.objects.all()
# start_date = self.request.query_params.get('start_date', None)
# end_date = self.request.query_params.get('end_date', None)
# if start_date and end_date:
# queryset = queryset1.filter(timstamp__range=[start_date, end_date])
def index(request):
ans = ''
ans2 = ''
ans3 = ''
ans4 = ''
ans5 = ''
d = ""
x_new = ""
if(request.method == 'POST'):
x = request.POST.get('date')
print(type(x))
today = date.today()
x_new = pd.to_datetime(x)
# if(x_new < today):
if(4 == 5):
print("****************************** less than ")
# x = x.strftime("%m/%d/%Y")
p = '"' + x + '"'
y = ml.df.loc[x,'AQI']
y2 = ml.df.loc[x,'SO2']
y3 = ml.df.loc[x,'NO2']
y4 = ml.df.loc[x,'O3']
y5 = ml.df.loc[x,'PM10']
# ans = "{:.2f}".format(y)
# ans2 = "{:.2f}".format(y2)
# ans3 = "{:.2f}".format(y3)
# ans4 = "{:.2f}".format(y4)
# ans5 = "{:.2f}".format(y5)
ans = y
ans2 = y2
ans3 = y3
ans4 = y4
ans5 = y5
d = x
else:
p = '"' + x + '"'
y = ml.df_pred.loc[x,'AQI']
y2 = ml.df_pred.loc[x,'SO2']
y3 = ml.df_pred.loc[x,'NO2']
y4 = ml.df_pred.loc[x,'O3']
y5 = ml.df_pred.loc[x,'PM10']
# ans = "{:.2f}".format(y)
# ans2 = "{:.2f}".format(y2)
# ans3 = "{:.2f}".format(y3)
# ans4 = "{:.2f}".format(y4)
# ans5 = "{:.2f}".format(y5)
ans = y
ans2 = y2
ans3 = y3
ans4 = y4
ans5 = y5
d = x
max_date = "2021-06-12"
today = date.today()
last_data_date = ml.df_pred.index[-1]
date_today = today
last_data_date = pd.to_datetime(last_data_date)
date_today =
|
pd.to_datetime(date_today)
|
pandas.to_datetime
|
import streamlit as st
import pandas as pd
import numpy as np
from joblib import load
@st.cache
def cvt_df(df):
return df.to_csv().encode("utf-8")
@st.cache
def load_example_df():
return
|
pd.read_csv("https://raw.githubusercontent.com/YP-Learning/streamlit-fcc/main/project8/penguins_example.csv?token=<KEY>")
|
pandas.read_csv
|
import pandas as pd
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.metrics import mean_squared_error
import os
class collaborativeFiltering():
def __init__(self):
pass
def readSongData(self, top):
"""
Read song data from targeted url
"""
if 'song.pkl' in os.listdir('_data/'):
song_df =
|
pd.read_pickle('_data/song.pkl')
|
pandas.read_pickle
|
import time
import cv2
import os
import random
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from keras.applications.resnet50 import preprocess_input
import xml.etree.ElementTree as et
# module level variables
IMAGES_FOLDER = "/home/masi/Projects/CNN_object_localizer/VOCdevkit/VOC2012/JPEGImages"
META_DATA_FOLDER = "/home/masi/Projects/CNN_object_localizer/VOCdevkit/VOC2012/Annotations"
INPUT_SHAPE = (540, 1024, 3)
PREPROCESSED_DATA_DIR = "../preprocessed_data"
TRAIN_DATA_FILE = PREPROCESSED_DATA_DIR + os.sep + "train_data"
VALIDATION_DATA_FILE = PREPROCESSED_DATA_DIR + os.sep + "validation_data"
TEST_DATA_FILE = PREPROCESSED_DATA_DIR + os.sep + "test_data"
def get_data(images_folder: str, meta_data_folder: str):
"""Parse data from PASCAL VOC xml files"""
print("Parsing data..", end='')
start = time.time()
meta_filenames = [os.path.join(meta_data_folder, fn) for fn in
os.listdir(meta_data_folder)]
data = []
for i, meta_fn in enumerate(meta_filenames):
root = et.parse(meta_fn).getroot()
fn_object = root.find('filename')
filename = fn_object.text
w = int(root.find('size').find('width').text)
h = int(root.find('size').find('height').text)
d = int(root.find('size').find('depth').text)
shape = (w, h, d)
for o in root.iter("object"):
name = o.find('name').text
# get annotations with object element
xmin = float(o.find('bndbox').find('xmin').text)
xmax = float(o.find('bndbox').find('xmax').text)
ymin = float(o.find('bndbox').find('ymin').text)
ymax = float(o.find('bndbox').find('ymax').text)
data.append(
[filename, meta_fn, name, shape, xmin, xmax, ymin, ymax])
df = pd.DataFrame(data, columns=['filename', 'meta_filename', 'name',
'shape', 'xmin', 'xmax', 'ymin', 'ymax'])
df['filename'] = df['filename'].apply(
lambda x: os.path.join(images_folder, x))
print("...done in {:0.2f} seconds".format(time.time() - start))
return df
def create_random_window(image_size):
"""Create a random window within """
xmin, xmax = np.sort(np.random.randint(0, image_size[1], 2))
ymin, ymax = np.sort(np.random.randint(0, image_size[0], 2))
return xmin, xmax, ymin, ymax
def compute_window_overlap(win1, win2):
"""Compute overlap between two windows"""
xset = set(np.arange(win1[0], win1[1]))
yset = set(np.arange(win1[2], win1[3]))
xset2 = set(np.arange(win2[0], win2[1]))
yset2 = set(np.arange(win2[2], win2[3]))
xint = xset.intersection(xset2)
yint = yset.intersection(yset2)
xuni = xset.union(xset2)
yuni = yset.union(yset2)
return (len(xint) / len(xuni)) * (len(yint) / len(yuni))
def generate_negative_samples(df: pd.DataFrame):
"""Generate samples for background class from classes other than 'person'"""
start = time.time()
negatives = []
df_neg = df[~df['name'].str.contains('person')]
for ind, row in df_neg.iterrows():
# row = row.copy()
row['name'] = 'background'
row['xmin'] = 0
row['xmax'] = row['shape'][0]
row['ymin'] = 0
row['ymax'] = row['shape'][1]
negatives.append(row)
print(
"Sample generation took: {:0.2f} seconds".format(time.time() - start))
return negatives
def prepare_dataset(images_folder: str, meta_data_folder: str,
save: bool = False, load: bool = False):
"""Prepare dataset for training
Parse xml files, create background class, remove samples with multiple
persons (because this is localization, not object detection),
randomize data, split data to training, validation and test partitions
(50-25-25). Allow saving and previously prepared dataset from file"""
if load and os.path.exists(TRAIN_DATA_FILE) and os.path.exists(
VALIDATION_DATA_FILE) and os.path.exists(TEST_DATA_FILE):
print('loading dataset..', end='')
train_data = pd.read_pickle(TRAIN_DATA_FILE)
validation_data = pd.read_pickle(VALIDATION_DATA_FILE)
test_data = pd.read_pickle(TEST_DATA_FILE)
print('..done')
else:
data = get_data(images_folder, meta_data_folder)
# generate negative samples
negatives = generate_negative_samples(data)
data = data.append(negatives)
# select only rows with persons or generated negative samples (background)
data = data[
(data['name'] == 'person') | (data['name'] == 'background')]
# remove images with more than one person or background
data.drop_duplicates(['filename'], keep=False, inplace=True)
# randomize
data = data.sample(frac=1).reset_index()
# split
split = int(data.shape[0] / 2)
train_data = data.iloc[1:split]
test_data = data.iloc[split:]
validation_data = test_data.sample(frac=0.5, replace=False)
test_data = test_data[~test_data.index.isin(validation_data.index)]
print("Data splits: "
"\n train data: {:0.2f}% ({})"
"\n validation data: {:0.2f}% ({})"
"\n test data: {:0.2f}% ({})"
.format(100 * train_data.shape[0] / data.shape[0],
train_data.shape[0],
100 * validation_data.shape[0] / data.shape[0],
validation_data.shape[0],
100 * test_data.shape[0] / data.shape[0],
test_data.shape[0]))
if save:
print('saving dataset..', end='')
pd.to_pickle(train_data, TRAIN_DATA_FILE)
pd.to_pickle(validation_data, VALIDATION_DATA_FILE)
|
pd.to_pickle(test_data, TEST_DATA_FILE)
|
pandas.to_pickle
|
import pandas as pd
# Function converts the obvious numeric columns into int/float
def prelim_numeric_converter(df):
df.loc[:, df.columns.str.startswith('VotingPerformance')] = df.loc[:, df.columns.str.startswith('VotingPerformance')].replace(["%"], "", regex=True)
df.loc[:, df.columns.str.startswith('Commercial')] = df.loc[:, df.columns.str.startswith('Commercial')].replace(["$"], "", regex=True)
cols = df.columns
for c in cols:
try:
df[c] = pd.to_numeric(df[c])
except:
pass
return df
# converting election results to numeric (resource heavy)
def election_numeric_converter(df):
col_len = df.shape[1]-1
df.loc[:, df.columns.str.startswith('Election')] = df.loc[:, df.columns.str.startswith('Election')].replace(["%"], "", regex=True)
for i in range(df.loc[:, df.columns.str.startswith('Election')].shape[1]):
try:
df.iloc[:, col_len-i] =
|
pd.to_numeric(df.iloc[:, col_len-i])
|
pandas.to_numeric
|
from timeit import repeat
import numpy as np
import pandas as pd
from randomgen import MT19937, DSFMT, ThreeFry, PCG64, Xoroshiro128, \
Xorshift1024, Philox, Xoshiro256StarStar, Xoshiro512StarStar
PRNGS = [DSFMT, MT19937, Philox, PCG64, ThreeFry, Xoroshiro128, Xorshift1024,
Xoshiro256StarStar, Xoshiro512StarStar]
funcs = {'32-bit Unsigned Ints': 'random_uintegers(size=1000000,bits=32)',
'64-bit Unsigned Ints': 'random_uintegers(size=1000000,bits=32)',
'Uniforms': 'random_sample(size=1000000)',
'Complex Normals': 'complex_normal(size=1000000)',
'Normals': 'standard_normal(size=1000000)',
'Exponentials': 'standard_exponential(size=1000000)',
'Gammas': 'standard_gamma(3.0,size=1000000)',
'Binomials': 'binomial(9, .1, size=1000000)',
'Laplaces': 'laplace(size=1000000)',
'Poissons': 'poisson(3.0, size=1000000)', }
setup = """
from randomgen import {prng}
rg = {prng}().generator
"""
test = "rg.{func}"
table = {}
for prng in PRNGS:
print(prng)
col = {}
for key in funcs:
t = repeat(test.format(func=funcs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
col = pd.Series(col)
table[prng().__class__.__name__] = col
npfuncs = {}
npfuncs.update(funcs)
npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)'
npfuncs['64-bit Unsigned Ints'] = 'tomaxint(size=1000000)'
del npfuncs['Complex Normals']
setup = """
from numpy.random import RandomState
rg = RandomState()
"""
col = {}
for key in npfuncs:
t = repeat(test.format(func=npfuncs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
table['NumPy'] =
|
pd.Series(col)
|
pandas.Series
|
from __future__ import absolute_import
#SymPy is a non-commercial alternative to Mathematica and Maple
# SymPy can map variable to a value or a matrix.
# SymPy's Symbolic Statistical Modelling uses scintific computing.
import sys
import numpy as np
import sympy as sp
import pandas as pd
from pathlib import Path
from .tokens import *
from .equation import *
class Equations(Equation):
def __init__(self):
path = Path(__file__).parent
self.filepath = path.joinpath("fixtures","equations.xlsx")
self.equations_sheet = "equations"
self.column_mapping_sheet = "col_var_mapping"
self.data_sheet = "values"
self.mappings = None
self.df = None
self.equations_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Class Features
Name: lib_data_io_ascii
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import os
import itertools
from abc import ABC
from copy import deepcopy
import numpy as np
import pandas as pd
from hmc.algorithm.default.lib_default_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Super class to wrap dataframe behaviour
class DFrameCustom(pd.DataFrame, ABC):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read state point file
def read_state_point(file_name, file_time, var_name='state', file_time_start=None, file_time_end=None, file_time_frequency='H',
file_columns_type=None, file_columns_name=None, list_columns_excluded=None):
if file_columns_type is None:
file_columns_type = {0: 'dset'}
file_type = list(file_columns_type.values())
if file_time_start == file_time_end:
time_range = pd.DatetimeIndex([file_time_end])
time_n = time_range.__len__()
else:
log_stream.error(' ===> Time steps conditions are not supported!')
raise NotImplementedError('Case not implemented')
if isinstance(file_name, list):
file_name = file_name[0]
dframe_summary = {}
if os.path.exists(file_name):
file_table = pd.read_table(file_name, header=None)
file_row_values = file_table.values.tolist()
id_tot = 0
data_obj = {}
for name_id, name_step in enumerate(file_columns_name):
for type_id, type_step in enumerate(file_type):
file_row_tmp = file_row_values[id_tot]
file_row_step = file_row_tmp[0].strip().split()
if type_step not in list_columns_excluded:
if type_step == 'dam_index':
row_data = [int(elem) for elem in file_row_step]
else:
row_data = [float(elem) for elem in file_row_step]
if type_step not in list(data_obj.keys()):
data_obj[type_step] = {}
data_obj[type_step][name_step] = row_data
id_tot += 1
for var_id, (var_key, var_ts) in enumerate(data_obj.items()):
for var_pivot, var_data in var_ts.items():
dframe_pnt = DFrameCustom(index=time_range)
dframe_pnt.name = var_name
dframe_tmp = pd.DataFrame(index=time_range, data=var_data, columns=[var_pivot])
dframe_tmp.index.name = 'Time'
series_filled = dframe_tmp.iloc[:, 0]
dframe_pnt[var_pivot] = series_filled
if var_key not in list(dframe_summary.keys()):
dframe_summary[var_key] = dframe_pnt
else:
dframe_tmp = dframe_summary[var_key]
dframe_join = dframe_tmp.join(dframe_pnt, how='right')
dframe_join.name = var_name
dframe_summary[var_key] = dframe_join
else:
dframe_summary = None
return dframe_summary
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read outcome point file
def read_outcome_point(file_name, file_time, file_columns=None, file_map=None, file_ancillary=None):
if file_columns is None:
file_columns = {0: 'dset'}
if not isinstance(file_name, list):
file_name = [file_name]
data_obj = {}
time_step_expected = []
time_step_exists = []
for file_n, (file_step, time_step) in enumerate(zip(file_name, file_time)):
time_step_expected.append(time_step)
if os.path.exists(file_step):
file_size = os.path.getsize(file_step)
if file_size > 0:
file_table = pd.read_table(file_step, header=None)
time_step_exists.append(time_step)
for row_id, row_value in zip(file_table.index, file_table.values):
if row_value.__len__() == 1:
row_value = row_value[0]
else:
raise NotImplementedError(' ===> Length list not allowed')
if row_id not in list(data_obj.keys()):
data_obj[row_id] = [row_value]
else:
row_tmp = data_obj[row_id]
row_tmp.append(row_value)
data_obj[row_id] = row_tmp
else:
log_stream.warning(' ===> Size of ' + file_step + ' is equal to zero. File is empty.')
data_obj = None
if data_obj is not None:
data_var = {}
for data_id, (data_ref, data_ts) in enumerate(data_obj.items()):
if file_ancillary is not None:
data_name = list(file_ancillary.keys())[data_id]
else:
data_name = data_ref
for tag_columns in file_columns.values():
if tag_columns not in list(data_var.keys()):
data_var[tag_columns] = {}
data_var[tag_columns][data_name] = {}
data_var[tag_columns][data_name] = data_ts
time_n = time_step_expected.__len__()
var_data_expected = [-9999.0] * time_n
dframe_summary = {}
dframe_merged =
|
pd.DataFrame(index=time_step_expected)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix =
|
MultiIndex.from_arrays([A.index.values, B.index.values])
|
pandas.MultiIndex.from_arrays
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-dir", "--subdirectory", help="enter subdirectory name")
params = parser.parse_args()
subdir = params.subdirectory
os.chdir(subdir)
locationfile = pd.read_csv('location.csv')
accountfile = pd.read_csv('account.csv')
split_location = locationfile.groupby('FlexiLocUnit')
split_account = accountfile.groupby('FlexiAccUnit')
newpath = 'units'
if not os.path.exists(newpath):
os.makedirs(newpath)
cwd = os.getcwd()
for name, group in split_location:
sub_dir = os.path.join(newpath,(str)(name))
names = sorted([str(item[0]) for item in split_location])
#Function to sort fm string in Ascedning order
import re
def ascedning(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ ascedning(c) for c in re.split(r'(\d+)', text) ]
names.sort(key=natural_keys)
#print(names)
units_dir=os.path.join(cwd,'units')
if not os.path.exists(units_dir):
os.mkdir(units_dir)
with open(os.path.join(units_dir,'units.txt'), "w") as txt_file:
names, groups = map(list, zip(*split_location))
for name in names:
txt_file.write(str(name) + '\n')
# get directories
with open(os.path.join(units_dir,'units.txt'), "r") as txt_file:
fms = txt_file.read().split('\n')
dirs = []
for fm in fms:
if fm!='':
dirs.append(fm)
# combine dataframes
# start with first one
fm_first = dirs[0]
fm_first_filepath = os.path.join(newpath,fm_first,'location.csv')
df_loc = pd.read_csv(fm_first_filepath)
df_loc['FlexiLocUnit']=fm_first
# add in remaining fm files, iterating through remainder
for i in range(1,len(dirs)):
fm_next = dirs[i]
fm_next_filepath = os.path.join(newpath,fm_next,'location.csv')
df_loc_tmp = pd.read_csv(fm_next_filepath)
df_loc_tmp['FlexiLocUnit']=fm_next
# concat files
df_loc =
|
pd.concat([df_loc,df_loc_tmp])
|
pandas.concat
|
''' <NAME>'''
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Crea una clase que permita obtener información estadística y gráfica de un
# conjunto de datos de las máquinas de clima en cierto periodo de tiempo
class report:
def __init__(self, path, extension='pkl', del_tests=True):
self.path = path # El path del archivo que contiene los datos
self.del_tests = del_tests
# De pendiendo de la extensión del archivo, se ejecuta una función diferente para leerlos
if extension == 'pkl':
self.data =
|
pd.read_pickle(path)
|
pandas.read_pickle
|
import ast
import inspect
import logging
import os
import pathlib
import sys
import typing
import numpy as np
import pandas as pd
from pymatgen.core.periodic_table import _pt_data, Element
from AnalysisModule.calculator.fp import GetFpArray
from AnalysisModule.routines.util import MDefined, NMDefined
from AnalysisModule.routines.util import load_pkl, save_pkl
from MLModule.utils import load_input_tables
from MLModule.utils import variance_threshold_selector, split_columns
this_dir = os.path.dirname(os.path.abspath(__file__))
def Encode_bus(bus_column: [str], BuidTable=None):
BUid_in_dataset = sorted(BuidTable.keys())
num_bus = len(BUid_in_dataset)
buid_encoding_dict = {v: k for k, v in enumerate(BUid_in_dataset)}
ohe_array = np.zeros((len(bus_column), num_bus), dtype=np.float32)
for i_entry, bu_entry in enumerate(bus_column):
for buid in ast.literal_eval(bu_entry):
ohe_array[i_entry][buid_encoding_dict[buid]] = 1
logging.info("{} gives # of columns: {}".format(inspect.stack()[0][3], ohe_array.shape[1]))
columns = ["Buid_bit_{}".format(num) for num in range(ohe_array.shape[1])]
ohe_df = pd.DataFrame(ohe_array, columns=columns)
return ohe_array, ohe_df
def Encode_elements(
compositions: [str],
possible_elements: [str] = None,
exclude_elements=("H", "O"),
exclude_groups=("noble_gas",),
feature_header="Elements_bit"
):
"""
one hot encoder for elementary strings
"""
if possible_elements is None:
possible_elements = sorted(_pt_data.keys())
elements = []
for e in possible_elements:
if e in exclude_elements:
continue
element = Element(e)
if any(getattr(element, "is_{}".format(p)) for p in exclude_groups):
continue
elements.append(e)
elements = sorted(elements)
n_compositions = len(compositions)
ohe_array = np.zeros((n_compositions, len(elements)), dtype=np.float32)
presented = []
for icomp, composition in enumerate(compositions):
symbol_list = ast.literal_eval(composition)
for string in symbol_list:
presented.append(string)
if string == "O":
continue
ind = elements.index(string)
ohe_array[icomp][ind] = 1
logging.info("{} gives # of columns: {}".format(inspect.stack()[0][3], ohe_array.shape[1]))
# columns = ["{}_{}".format(feature_header, num) for num in range(ohe_array.shape[1])]
columns = [elements[num] for num in range(ohe_array.shape[1])]
ohe_df = pd.DataFrame(ohe_array, columns=columns)
return ohe_array, ohe_df
def Encode_ms(compositions: [str]):
return Encode_elements(compositions, possible_elements=sorted(MDefined))
def Encode_nms(compositions: [str]):
return Encode_elements(compositions, possible_elements=sorted(NMDefined))
def Encode_smiles(smis: [str],
nbits=1024,
funcname="cluster",
AmineTable=None,
):
if funcname == "dummy":
ohe_df = pd.get_dummies(smis, dtype=np.float32)
ohe_array = ohe_df.values.astype(np.float32)
return ohe_array, ohe_df
if funcname == "cluster":
smi_classes = [AmineTable[smi]["saoto_clusterlabel"] for smi in smis]
ohe_df =
|
pd.get_dummies(smi_classes, dtype=np.float32)
|
pandas.get_dummies
|
# Version 3.1
# This script was designed to work with a single DMSO control and 9 drug doses.
# Takes an .xls from a CellTiter-Glo assay analyzed on a plate reader
# and outputs graphs for each sheet in the .xls. Raw data and processed data
# are stored in separate tables of a PostgreSQL database.
# Pip Freeze:
# certifi==2019.9.11
# cycler==0.10.0
# kiwisolver==1.1.0
# matplotlib==3.1.1
# mkl-fft==1.0.14
# mkl-random==1.1.0
# mkl-service==2.3.0
# numpy==1.17.2
# pandas==0.25.2
# pyparsing==2.4.2
# python-dateutil==2.8.0
# pytz==2019.3
# six==1.12.0
# tornado==6.0.3
# xlrd==1.2.0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import argparse
import re
import psycopg2
# Set a loop or option to analyze all files in directory if desired
def file_selection(input_path=os.getcwd()):
"""
input_path: The directory specified on the command line at initiation of
program. Defaults to the current directory if not specified.
Seaches for an .xls file to analyze in the following locations: first,
the provided input path from the command line; second, the current
directory; third, a directory specified in the input if the previous two
locations do not contain the correct file."""
# Generates a list of possible files in the provided path
files = sorted([file for file in os.listdir(input_path)
if file.endswith(".xls") or file.endswith(".xlsx")],
reverse=True)
file_dict = {}
if files == []:
print("No files were found.")
path = input(
"Enter the full path of the file's location to load or 'exit' to abort.")
if path == "exit": # Abort the process if "exit" entered.
print("Process aborted")
sys.exit()
while True: # Attempt to find .xls files from the provided path
try:
files = sorted([file for file in os.listdir(
path) if file.endswith(".xls") or file.endswith(".xlsx")])
break
except FileNotFoundError:
print("No such file or directory.")
# Enumerate current data files and add them to file_dict for calling later by number
print("Current files available")
for i, file in enumerate(files):
print(i, file)
file_dict[i] = file
# Request file number and verify input or abort
while True:
chosen_file_number = input("\nEnter a file number to analyze or 'exit' to abort.\n")
if chosen_file_number.isdigit():
if int(chosen_file_number) in range(0, len(files)):
chosen_file = file_dict[int(chosen_file_number)]
print(chosen_file)
break
elif chosen_file_number == "exit":
print("Process aborted")
sys.exit()
else:
print("Your answer was not an appropriate number.")
print("\nChosen file: ", chosen_file)
final_check = input(
"\nIf the above information is correct, press 'enter.' Otherwise, type anything to abort.")
if final_check == "":
print(f"\nContinuing with analysis using {chosen_file}.")
return os.path.join(input_path, chosen_file)
else:
print("\nProcess aborted\n")
sys.exit()
def experimental_parameter_check():
"""Sets variables for drug, doses, and units depending on the drug used for the experiment."""
drug = None
drug_list = ["A-1155463", "AMG-176", "Venetoclax"]
while True:
# Set up options for choosing drug used in experiment
drug_option_dict = {}
for i, drug_used in enumerate(drug_list):
print(i, drug_used)
drug_option_dict[i] = drug_used
# Ask for input to select correct drug
drug_number = input(
"\nEnter the number above corresponding to the correct drug, or type 'exit' to abort.\n")
if drug_number.isnumeric():
if int(drug_number) in range(0, len(drug_list)):
drug = drug_option_dict[int(drug_number)]
break
elif drug_number.lower() == "exit":
print("Process aborted")
sys.exit()
# Set doses and unit for proper drug
if drug in ["AMG-176", "Venetoclax"]:
doses = [0, 5, 16, 48, 144, 432, 1296, 3888, 11666, 35000]
unit = "nM"
elif drug == "A-1155463":
doses = []
unit = "nM"
# Print experimental parameters for final check
print("\n" + "Experimental Parameters:")
print("Drug:", drug)
print("Unit:", unit)
print("Dose range:", doses)
experiment_information_check = input(
"\nIf the above information is correct, press 'enter.' Otherwise, type anything to abort.")
if experiment_information_check == "":
print("\nContinuing with analysis\n")
return doses, unit, drug
else:
print("Process aborted")
sys.exit()
def date_and_experimenter():
"""A simple function that asks for input to determine who generated the data and
when. This information will be added to the SQL table for each experiment."""
# Loop to get correct date
while True:
date = input('What date was the plate read? Format as yyyy-mm-dd.\n')
regex_pattern = '^20\d{2}-\d{2}-\d{2}$'
if re.search(regex_pattern, date):
break
else:
print("""\nThat is not a valid date.\n
Use this format: 2019-03-25\n""")
# Loop to get correct initials (2-3 characters)
while True:
experimenter = input('What are the initials of the person who generated the data?\n')
if len(experimenter) <= 3:
break
else:
print('Initials should not be longer than 3 letters.')
return date, experimenter.upper()
def make_mean_df(means, stdev, cell_line):
"""
means: The mean luminescence values from an xls document of a CTG experiment.
stdev: The standard deviation values from an xls document of a CTG experiment.
cell_line: The name of the cell line used for the experiment. Determined
from the sheet name using clean_sheet_name().
Generates a normalized mean column using means,
and concatenates the means, stdev, and normalized means
into a data frame for plotting.
"""
normalized_mean =
|
pd.Series(means/means[0], name="normalized_mean")
|
pandas.Series
|
import json
from pathlib import Path
import pandas as pd
root = Path(__file__).parents[1]
for gas in ["co2", "ch4", "n2o", "other"]:
data = json.load(open(root / f"{gas}.json"))
columns = ["Quarter"] + [
item["G1"]
for item in data["results"][0]["result"]["data"]["dsr"]["DS"][0]["SH"][0]["DM1"]
]
values = data["results"][0]["result"]["data"]["dsr"]["DS"][0]["PH"][0]["DM0"]
rows = [
[item["G0"]] + [float(i["M0"] if "M0" in i else 0) for i in item["X"]]
for item in values
]
df =
|
pd.DataFrame(rows, columns=columns)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import torch
from collections import defaultdict
import random
import networkx as nx
import torch.nn as nn
from sklearn.metrics import f1_score
import scipy.sparse as sp
import itertools
'''
Implementing ClusterGraph Algorithm using backend PyTorch
@Author: <NAME>
Args:
node_df -> pd.DataFrame
Necessary Format:
--------------------------------------------------------
cust_id | is_driver | is_reported | feat_1 | feat_2 |...
--------------------------------------------------------
edge_df -> pd.DataFrame
Necessary Format:
------------------
cust_id | opp_id
------------------
hidden_dim -> list(int)
list of dimensions in each hidden layers
num_clusters -> int
number of subgraph
cluster_method -> string
{'random', 'metis'}, if 'random', randomly cut graph into n clusters
if 'metis', import Pymetis do the clustering part.
default: 'metis'
epochs -> int
default: 20
lr -> float
learning rate default: 0.01
residual_block -> boolean
model including residual block or not, default: True
save_path -> string
saving path, default: None
v1:
1. updata metis clustering method
2. adding adjacency normalizaion L=D^-0.5 * (A+I) * D^-0.5
'''
class preprocessing():
def __init__(self,
node_df,
edge_df,
num_clusters,
cluster_method):
assert all(col in node_df.columns for col in ['cust_id','is_driver','is_reported'])
assert all(col in edge_df.columns for col in ['cust_id','opp_id'])
assert type(num_clusters) == int
self.node_df = node_df
self.edge_df = edge_df
self.num_clusters = num_clusters
self.cluster_method = cluster_method
self.data_dict = {}
def run(self):
self.delete_nodes()
graph = self.build_graph(self.edge_df)
if self.cluster_method == 'metis':
clusters, cluster_membership = self.metis_clustering()
elif self.cluster_method == 'random':
clusters, cluster_membership = self.random_clustering(graph)
self.adjacency = self.build_adjacency()
self.adjacency = self.normalization()
self.adjacency = self.adjacency2tensor()
self.feats_name = list(set(self.node_df.columns) - set(['cust_id','is_driver','is_reported']))
features = self.node_df[self.feats_name]
features = torch.from_numpy(np.array(features)).float()
features = torch.sparse.mm(self.adjacency,features)
self.node_df = pd.concat([self.node_df[['cust_id','is_driver','is_reported']],pd.DataFrame(data = np.array(features),columns = self.feats_name)], axis = 1)
self.data_dict['sg_nodes'], self.data_dict['sg_edges'], self.data_dict['sg_train_nodes'], self.data_dict['sg_test_nodes'], self.data_dict['sg_train_features'], self.data_dict['sg_test_features'], self.data_dict['sg_train_targets'], self.data_dict['sg_test_targets'] = self.build_membership_dict(graph,clusters, cluster_membership)
return graph, self.data_dict
def delete_nodes(self):
# node_lookup: store node index
node_lookup = pd.DataFrame({'node': self.node_df.index}, index=self.node_df.cust_id)
# delete no-edge-node
diff_node = list(set(self.node_df['cust_id'])-(set(self.node_df['cust_id']) - set(self.edge_df['cust_id']) - set(self.edge_df['opp_id'])))
self.node_df = self.node_df.iloc[node_lookup.iloc[diff_node]['node']].reset_index(drop=True)
def build_graph(self, edge_df):
# build up graph using networkx
graph = nx.from_edgelist([(cust, opp) for cust, opp in zip(edge_df['cust_id'], edge_df['opp_id'])])
return graph
def metis_clustering(self):
import pymetis
clusters = [cluster for cluster in range(self.num_clusters)]
node_lookup = pd.DataFrame({'node': self.node_df.index, }, index = self.node_df.cust_id)
self.adjacency_dict = defaultdict(list)
for cust, opp in zip(self.edge_df['cust_id'], self.edge_df['opp_id']):
self.adjacency_dict[node_lookup.loc[cust]['node']].append(node_lookup.loc[opp]['node'])
adjacency_list = []
for node in list(self.node_df['cust_id']):
adjacency_list.append(self.adjacency_dict[node])
_, membership= pymetis.part_graph(self.num_clusters, adjacency_list)
cluster_membership = {}
for node, member in zip(list(self.node_df['cust_id']), membership):
cluster_membership[node] = member
return clusters, cluster_membership
def random_clustering(self, graph):
# random_clustering
clusters = [cluster for cluster in range(self.num_clusters)]
cluster_membership = {node: random.choice(clusters) for node in graph.nodes()}
node_lookup = pd.DataFrame({'node': self.node_df.index, }, index = self.node_df.cust_id)
self.adjacency_dict = defaultdict(list)
for cust, opp in zip(self.edge_df['cust_id'], self.edge_df['opp_id']):
self.adjacency_dict[node_lookup.loc[cust]['node']].append(node_lookup.loc[opp]['node'])
return clusters, cluster_membership
def build_adjacency(self):
"""
build adjacency matrix according to adjacency dictionary
"""
edge_index = []
num_nodes = len(self.adjacency_dict)
for src, dst in self.adjacency_dict.items():
edge_index.extend([src, v] for v in dst)
edge_index.extend([v, src] for v in dst)
edge_index = list(k for k, _ in itertools.groupby(sorted(edge_index)))
edge_index = np.asarray(edge_index)
adjacency = sp.coo_matrix((np.ones(len(edge_index)),
(edge_index[:, 0], edge_index[:, 1])),
shape=(num_nodes, num_nodes), dtype="float32")
return adjacency
def normalization(self):
"""
calculate L=D^-0.5 * (A+I) * D^-0.5
"""
self.adjacency += sp.eye(self.adjacency.shape[0])
degree = np.array(self.adjacency.sum(1))
d_hat = sp.diags(np.power(degree, -0.5).flatten())
return d_hat.dot(self.adjacency).dot(d_hat).tocoo()
def adjacency2tensor(self):
"""
convert numpy.array adjacency matrix to torch.tensor
"""
num_nodes, input_dim = self.node_df.shape
input_dim -= 3
indices = torch.from_numpy(np.asarray([self.adjacency.row,
self.adjacency.col]).astype('int64')).long()
values = torch.from_numpy(self.adjacency.data.astype(np.float32))
tensor_adjacency = torch.sparse.FloatTensor(indices, values,
(num_nodes, num_nodes))
return tensor_adjacency
def build_membership_dict(self, graph, clusters, cluster_membership):
# build-up membership dict
sg_nodes = {}
sg_edges = {}
sg_train_nodes = {}
sg_test_nodes = {}
sg_train_features = {}
sg_test_features = {}
sg_train_targets = {}
sg_test_targets = {}
for cluster in clusters:
#print(cluster)
subgraph = graph.subgraph([node for node in sorted(graph.nodes()) if cluster_membership[node] == cluster])
sg_nodes[cluster] = [node for node in sorted(subgraph.nodes())]
mapper = {node: i for i, node in enumerate(sorted(sg_nodes[cluster]))}
sg_edges[cluster] = [[mapper[edge[0]], mapper[edge[1]]] for edge in subgraph.edges()] + [[mapper[edge[1]], mapper[edge[0]]] for edge in subgraph.edges()]
sg_train_nodes[cluster] = [node for node in self.node_df[self.node_df['is_driver'] == True]['cust_id'] if node in sg_nodes[cluster]]
sg_test_nodes[cluster] = [node for node in self.node_df[self.node_df['is_driver'] == False]['cust_id'] if node in sg_nodes[cluster]]
sg_test_nodes[cluster] = sorted(sg_test_nodes[cluster])
sg_train_nodes[cluster] = sorted(sg_train_nodes[cluster])
sg_train_features[cluster] = pd.concat([self.node_df[(self.node_df['cust_id'] == cust)&(self.node_df['is_driver'] == True)][self.feats_name] for cust in sg_nodes[cluster]],axis = 0)
sg_test_features[cluster] = pd.concat([self.node_df[(self.node_df['cust_id'] == cust)&(self.node_df['is_driver'] == False)][self.feats_name] for cust in sg_nodes[cluster]],axis = 0)
sg_train_targets[cluster] =
|
pd.concat([self.node_df[(self.node_df['cust_id'] == cust)&(self.node_df['is_driver'] == True)][['is_reported']] * 1 for cust in sg_nodes[cluster]],axis = 0)
|
pandas.concat
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
|
nv.validate_min(args, kwargs)
|
pandas.compat.numpy.function.validate_min
|
import os
import requests
import pandas as pd
from datatable import dt, fread, f, g, join
from urllib3.exceptions import HTTPError
from .get_chembl_compound_targets import parallelize
from .combine_pset_tables import write_table
# -- Enable logging
from loguru import logger
import sys
logger_config = {
"handlers": [
{"sink": sys.stdout, "colorize": True, "format":
"<green>{time}</green> <level>{message}</level>"},
{"sink": f"logs/build_clinical_trails_tables.log",
"serialize": True, # Write logs as JSONs
"enqueue": True}, # Makes logging queue based and thread safe
]
}
logger.configure(**logger_config)
@logger.catch
def build_clinical_trial_tables(output_dir: str) -> pd.DataFrame:
"""
Build the clinical trial and compound trial tables by querying the
clinicaltrial.gov API. Queries are made by compound names from the compound
synonyms table.
@param output_dir: [`string`] The file path to the directory with all
PharmacoDB tables
@return: None
"""
# Load compound synonym table
compound_file = os.path.join(output_dir, 'compound_synonym.jay')
compound_df = fread(compound_file).to_pandas()[['compound_id', 'compound_name']]
# Query clinicaltrials.gov API to get clinical trials by compound name
logger.info('Getting clinical trials from clinicaltrials.gov...')
all_studies = parallelize(list(compound_df['compound_name']),
get_clinical_trials_by_compound_names, 50)
studies_df = pd.concat(all_studies)
# Explode list-like columns into separate rows, duplicating the index
# I only use this because all the fields are returned in arrays for some reason
object_columns = studies_df.dtypes[
studies_df.dtypes == 'object'
].index.values
for column in object_columns:
studies_df = studies_df.explode(column)
# Drop and rename columns
studies_df.drop(columns='Rank', inplace=True)
studies_df.rename(
columns={'NCTId': 'nct', 'SeeAlsoLinkURL': 'link',
'OverallStatus': 'status'},
inplace=True
)
# Build clinical trials table
clin_trial_df = studies_df[['nct', 'link', 'status']].copy()
clin_trial_df.drop_duplicates('nct', inplace=True)
clin_trial_df.reset_index(inplace=True, drop=True)
clin_trial_df['clinical_trial_id'] = clin_trial_df.index + 1
# Build compound trial table
compound_trial_df = studies_df[['nct', 'compound_name']].copy()
compound_trial_df.drop_duplicates(inplace=True)
compound_trial_df =
|
pd.merge(compound_trial_df, clin_trial_df, on='nct')
|
pandas.merge
|
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([
|
Timestamp("20130101 9:01")
|
pandas.Timestamp
|
#%% MORTALITY PREDICTOR
# This script takes in ONS male/female mortalities for 1981-2018, and uses
# exponential regression to predict male/female mortalities per age up to 2100.
#%% imports
import pandas as pd
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
#%% Preamble
# Import mortalities from ONS data. Drop the last entry as this is artifically
# set to 1 for an age of 101 as an artefact from using these datasets for
# a health economics model.
maleMortality = pd.read_csv('../data/ONS_mortalities_male_parsed.csv')
maleMortality = maleMortality.drop(101)
femaleMortality =
|
pd.read_csv('../data/ONS_mortalities_female_parsed.csv')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from dateutil import parser
from collections import OrderedDict
import glob, os, json
DIR = "./logs/v0"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
write_timestamps = {}
read_timestamps = {}
log_entries = {}
files = []
os.chdir(DIR)
for file in glob.glob("*.log"):
files.append(file)
# from https://stackoverflow.com/a/2400875
def diffs(t):
return [j-i for i, j in zip(t[:-1], t[1:])]
def aggregate(d):
l = []
prev = None
count = 0
for entry in d:
if prev == entry:
count += 1
else:
prev = entry
l.append(count)
count = 1
return l
for file in files:
write_timestamp_list = []
read_timestamp_list = []
log_entry_dict = OrderedDict()
with open(file, "r") as output:
print(f"Parsing {file}")
num = 0
for line in output:
num += 1
if len(line.strip()) > 0:
timestamp_pos = line.find(" ")
timestamp = int(line[:timestamp_pos])
write_timestamp_list.append(timestamp)
content = line[timestamp_pos + 1:].strip()
try:
json_content = json.loads(content)
log_entry_dict[timestamp] = json_content
read_timestamp_list.append(parser.isoparse(json_content["read"]))
except json.decoder.JSONDecodeError as err:
print(content)
print(num)
print(err)
write_timestamps[file] = write_timestamp_list
read_timestamps[file] = read_timestamp_list
log_entries[file] = log_entry_dict
def getOr(d, key, fallback):
if key in d:
return d[key]
else:
return fallback
def get_deltas(ts):
sorted_ts = sorted(ts)
min_ts = min(sorted_ts)
relative = list(t - min_ts for t in sorted_ts)
return diffs(relative)
print(f"Done parsing {len(files)} files")
for file in files:
wdeltas = get_deltas(float(t) / 1E6 for t in write_timestamps[file])
rdeltas = get_deltas(dt.timestamp() * 1000 for dt in read_timestamps[file])
print(f'Deltas for {file}:')
deltas_df = pd.DataFrame({'Write Deltas (ms)': wdeltas, 'Read Deltas (ms)': rdeltas})
print(deltas_df.describe(include='all'))
print("\n")
data = log_entries[file]
df = {
'total_usage': [data[t]['cpu_stats']['cpu_usage']['total_usage'] for t in data],
'pre_total_usage': [data[t]['precpu_stats']['cpu_usage']['total_usage'] for t in data],
'usage': [getOr(data[t]['memory_stats'], 'usage', 0) for t in data]
}
aggregate_df = {k: aggregate(diffs(d)) for k, d in df.items()}
for k, l in aggregate_df.items():
df_dict = {}
df_dict[f"average time (*period) between change {k}"] = l
specific_df =
|
pd.DataFrame(df_dict)
|
pandas.DataFrame
|
import streamlit as st
import pandas as pd
import numpy as np
from st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode
import statsapi
# data= pd.read_csv('df_sample_data.csv', index_col=0)
def rookie_hr_leader_dict():
rookie_hr_leaders_d = statsapi.league_leader_data('homeRuns', season=2021, playerPool='rookies', limit= 15)
# print(rookie_hr_leaders_d)
return rookie_hr_leaders_d
def hr_leader_pandas(hr_list):
df =
|
pd.DataFrame(hr_list)
|
pandas.DataFrame
|
import umap
import hdbscan
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import pandas as pd
from sentence_transformers import SentenceTransformer
from listener.config import conf
class TopicDiscoveryBert:
def __init__(self, data):
self._data = data
self._prepared_data = None
self._model = SentenceTransformer(conf['DEFAULT']['BERT_MODEL_LOCAL_PATH'])
self._cluster = None
self._embeddings = None
def get_data(self):
#assert data is in a particular format
if self._prepared_data is None:
self._data.RESPONSE = self._data.RESPONSE.astype(str)
self._data['clean_comment'] = self._data['RESPONSE'].apply(lambda x: x.lower())
self._prepared_data = self._data
def get_embeddings(self):
self.get_data()
self._embeddings = self._model.encode(
self._prepared_data['clean_comment']
,show_progress_bar=True)
def reduce_dims(self, n_components: int=2):
if self._embeddings is None:
self.get_embeddings()
return umap.UMAP(n_neighbors=3,
n_components=n_components,
metric='cosine').fit_transform(self._embeddings)
def cluster_dims(self):
self._cluster = hdbscan.HDBSCAN(min_cluster_size=3
).fit(self.reduce_dims())
def get_cluster_labels(self):
if self._cluster is None:
self.cluster_dims()
return self._cluster.labels_
def plot_clustering(self):
# reduce data to 2 dimensions for plotting
x = self.reduce_dims()
result =
|
pd.DataFrame(x, columns=['x', 'y'])
|
pandas.DataFrame
|
"""
This code is not for good model to ,
but to share with my teammate for another competitions under the rule of this competition.
In this script, I gave you a suggestion for extending or wrapping transformers, pipelines and estimator.
As you know, sklearn api having estimators, transformers and pipelines is very nice,
but lacks a flexibility to be used in a competition.
For example,
- can't update pandas or dask DataFrames.
- can't change row numbers (i.e. filter, reduce etc.)
- input validation (Of course, it is the good feature in writing production models.)
-
I recommend
- wrap
- extend
-
Caution!
- This code doesn't run to its end on this kernel because of the exceed of the memory limit.
- Only a tutorial implementation
- This code requires more refactoring
"""
import gc
import itertools
import logging
import os
import re
import sys
from abc import abstractmethod, ABCMeta, ABC
from multiprocessing.pool import Pool
from pathlib import Path
from time import perf_counter
from typing import Union
import keras
import lightgbm as lgb
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import torch
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import Sequence
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler, OneHotEncoder
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
from tqdm import tqdm
RANDOM_SEED = 10
NEXT_MKTRES_10 = "returnsOpenNextMktres10"
CATEGORY_START_END_PATTERN = r"[\{\}\']"
SPLIT_PATTERN = r"[{}']"
logger = logging.getLogger(__name__)
# logger.addHandler(logging.StreamHandler(sys.stdout))
# logger.addHandler(logging.FileHandler("main.log"))
try:
TEST_MARKET_DATA = Path(__file__).parent.joinpath("data/test/marketdata_sample.csv")
TEST_NEWS_DATA = Path(__file__).parent.joinpath("data/test/news_sample.csv")
except NameError as e:
TEST_MARKET_DATA = "data/test/marketdata_sample.csv"
TEST_NEWS_DATA = "data/test/news_sample.csv"
# MODEL_TYPE = "mlp"
# MODEL_TYPE = "lgb"
MODEL_TYPE = "sparse_mlp"
MARKET_ID = "market_id"
NEWS_ID = "news_id"
np.random.seed(10)
class FeatureSetting(object):
"""
Get remarkable the settings together.
If not in a kenel competition, it can be load from a configuration file.
"""
# remove_news_features = ["headline", "subjects", "headlineTag", "provider"]
remove_news_features = []
should_use_news_feature = True
remove_raw_for_lag = True
scale = True
scale_type = "standard"
# max_shift_date = 14
max_shift_date = 10
# since = date(2010, 1, 1)
since = None
should_use_prev_news = False
def main():
"""
Don't
:return:
"""
logger.info("This model type is {}".format(MODEL_TYPE))
# I don't recommend load because if you want to gc by yourself, you should return to main function.
# Please make an object to store dfs.
env, market_train_df, news_train_df = load_train_dfs()
market_preprocess = MarketPreprocess()
market_train_df = market_preprocess.fit_transform(market_train_df)
news_preprocess = NewsPreprocess()
news_train_df = news_preprocess.fit_transform(news_train_df)
features = Features()
market_train_df, news_train_df = features.fit_transform(market_train_df, news_train_df)
logger.info("First feature extraction has done")
max_day_diff = 3
gc.collect()
if FeatureSetting.should_use_news_feature:
linker = MarketNewsLinker(max_day_diff)
linker.link(market_train_df, news_train_df)
del news_train_df
del market_train_df
gc.collect()
market_train_df = linker.create_new_market_df()
linker.clear()
gc.collect()
else:
linker = None
model = ModelWrapper.generate(MODEL_TYPE)
market_train_df, _ = model.create_dataset(market_train_df, features, train_batch_size=1024,
valid_batch_size=1024)
gc.collect()
model.train(sparse_input=True)
model.clear()
days = env.get_prediction_days()
predictor = Predictor(linker, model, features, market_preprocess, news_preprocess)
predictor.predict_all(days, env)
logger.info('Done!')
env.write_submission_file()
logger.info([filename for filename in os.listdir('.') if '.csv' in filename])
def measure_time(func):
def inner(*args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
duration = perf_counter() - start
logger.info("%s took %.6f sec", func.__name__, duration)
return result
return inner
class UnionFeaturePipeline(object):
def __init__(self, *args):
if args is None:
self.transformers = []
else:
self.transformers = list(args)
def transform(self, df, include_sparse=True):
feature_columns = []
for transformer in self.transformers:
if isinstance(transformer, NullTransformer):
transformer.transform(df)
elif isinstance(transformer, DfTransformer):
df = transformer.transform(df)
else:
feature_columns.append(transformer.transform(df))
if include_sparse:
return df, sparse.hstack(feature_columns, format="csr")
if len(feature_columns) == 0:
return df, None
return df, np.hstack(feature_columns)
def add(self, transformer):
self.transformers.append(transformer)
def is_not_empty(list_like):
if list_like is None:
return False
if isinstance(list_like, np.ndarray) or sparse.issparse(list_like):
return list_like.shape[0] > 0
return len(list_like) > 0
class MarketNewsLinker(object):
"""
For complex join of dataframes,
It would be better two dataframe transformer class
"""
def __init__(self, max_day_diff):
self.market_df = None
self.news_df = None
self.market_columns = None
self.max_day_diff = max_day_diff
self.datatypes_before_aggregation = None
# self.concatable_features = concatable_fields
self.news_columns = None
def link_market_assetCode_and_news_assetCodes(self):
assetCodes_in_markests = self.market_df.assetCode.unique().tolist()
logger.info("assetCodes pattern in markets: {}".format(len(assetCodes_in_markests)))
assetCodes_in_news = self.news_df.assetCodes.unique()
assetCodes_in_news_size = len(assetCodes_in_news)
logger.info("assetCodes pattern in news: {}".format(assetCodes_in_news_size))
parse_multiple_codes = lambda codes: re.sub(SPLIT_PATTERN, "", str(codes)).split(", ")
parsed_assetCodes_in_news = [parse_multiple_codes(str(codes)) for codes in assetCodes_in_news]
# len(max(parsed_assetCodes_in_news, key=lambda x: len(x)))
# all_assetCode_type_in_news = list(set(itertools.chain.from_iterable(assetCodes_in_news)))
# check linking
links_assetCodes = [[[raw_codes, market_assetCode] for parsed_codes, raw_codes in
zip(parsed_assetCodes_in_news, assetCodes_in_news) if
str(market_assetCode) in parsed_codes] for market_assetCode in assetCodes_in_markests]
links_assetCodes = list(itertools.chain.from_iterable(links_assetCodes))
logger.info("links for assetCodes: {}".format(len(links_assetCodes)))
links_assetCodes = pd.DataFrame(links_assetCodes, columns=["newsAssetCodes", "marketAssetCode"],
dtype='category')
logger.info(links_assetCodes.shape)
# self.market_df = self.market_df.merge(links_assetCodes, left_on="assetCode", right_on="marketAssetCode",
# copy=False, how="left", left_index=True)
self.market_df = self.market_df.merge(links_assetCodes, left_on="assetCode", right_on="marketAssetCode",
copy=False, how="left")
logger.info(self.market_df.shape)
self.market_df.drop(["marketAssetCode"], axis=1, inplace=True)
def append_working_date_on_market(self):
self.market_df["date"] = self.market_df.time.dt.date
self.news_df["firstCreatedDate"] = self.news_df.firstCreated.dt.date
self.news_df.firstCreatedDate = self.news_df.firstCreatedDate.astype(np.datetime64)
working_dates = self.news_df.firstCreatedDate.unique()
working_dates.sort()
market_dates = self.market_df.date.unique().astype(np.datetime64)
market_dates.sort()
def find_prev_date(date):
for diff_day in range(1, self.max_day_diff + 1):
prev_date = date - np.timedelta64(diff_day, 'D')
if len(np.searchsorted(working_dates, prev_date)) > 0:
return prev_date
return None
prev_news_days_for_market_day = np.apply_along_axis(arr=market_dates, func1d=find_prev_date, axis=0)
date_df = pd.DataFrame(columns=["date", "prevDate"])
date_df.date = market_dates
date_df.prevDate = prev_news_days_for_market_day
self.market_df.date = self.market_df.date.astype(np.datetime64)
self.market_df = self.market_df.merge(date_df, left_on="date", right_on="date", how="left")
def link_market_id_and_news_id(self):
logger.info("linking ids...")
self.news_columns = self.news_df.columns.tolist()
# merge market and news
market_link_columns = [MARKET_ID, "time", "newsAssetCodes", "date", "prevDate"]
news_link_df = self.news_df[["assetCodes", "firstCreated", "firstCreatedDate", NEWS_ID]]
self.news_df.drop(["assetCodes", "firstCreated", "firstCreatedDate"], axis=1, inplace=True)
link_df = self.market_df[market_link_columns].merge(news_link_df, left_on=["newsAssetCodes", "date"],
right_on=["assetCodes", "firstCreatedDate"], how='left')
link_df = link_df[link_df["time"] > link_df["firstCreated"]]
link_df.drop(["time", "newsAssetCodes", "date", "prevDate"], axis=1, inplace=True)
if FeatureSetting.should_use_prev_news:
prev_day_link_df = self.market_df[market_link_columns].merge(
news_link_df, left_on=["newsAssetCodes", "prevDate"],
right_on=["assetCodes", "firstCreatedDate"])
prev_day_link_df = prev_day_link_df[
prev_day_link_df["time"] - pd.Timedelta(days=1) < prev_day_link_df["firstCreated"]]
prev_day_link_df = prev_day_link_df.drop(
["time", "newsAssetCodes", "date", "prevDate"], axis=1, inplace=True)
del news_link_df
gc.collect()
if FeatureSetting.should_use_prev_news:
# link_df = pd.concat([link_df, prev_day_link_df])
link_df = link_df.append(prev_day_link_df)
del prev_day_link_df
gc.collect()
self.market_df = self.market_df.merge(link_df, on=MARKET_ID, how="left", copy=False)
# self.market_df = self.market_df.merge(link_df, on=MARKET_ID, how="left")
del link_df
gc.collect()
logger.info("shape after append news" + str(self.market_df.shape))
def aggregate_day_asset_news(self):
logger.info("aggregating....")
agg_func_map = {column: "mean" for column in self.market_df.columns.tolist()
if column == "marketCommentary" or column not in self.market_columns}
agg_func_map.update({col: "first"
for col in self.market_columns})
agg_func_map[NEWS_ID] = lambda x: x.tolist()
logger.info(agg_func_map)
logger.info(self.market_df.dtypes)
self.market_df = self.market_df.groupby(MARKET_ID).agg(agg_func_map)
logger.info("the aggregation for each group has done")
self._update_inner_data()
def _update_inner_data(self):
self.market_columns = self.market_df.columns.tolist()
@measure_time
def link(self, market_df, news_df, pool=4):
self.market_df = market_df
self.news_df = news_df
self.pool = pool
self.market_columns = self.market_df.columns.tolist()
self.datatypes_before_aggregation = {col: t for col, t in zip(self.market_columns, self.market_df.dtypes)}
self.datatypes_before_aggregation.update(
{col: t for col, t in zip(self.news_df.columns, self.news_df.dtypes)}
)
self.link_market_assetCode_and_news_assetCodes()
self.append_working_date_on_market()
return self.link_market_id_and_news_id()
@measure_time
def create_new_market_df(self):
logger.info("updating market df....")
dropped_columns = ["date", "prevDate", "newsAssetCodes",
"assetCodes",
"firstCreated", "firstCreatedDate"]
logger.info(self.market_df.columns)
self.market_df.drop(dropped_columns, axis=1, inplace=True)
self.market_df.sort_values(by=MARKET_ID, inplace=True)
self.aggregate_day_asset_news()
logger.info("linking done")
return self.market_df
def clear(self):
del self.market_df
self.market_df = None
self.news_df = None
self.market_columns = None
self.datatypes_before_aggregation = None
def compress_dtypes(news_df):
for col, dtype in zip(news_df.columns, news_df.dtypes):
if dtype == np.dtype('float64'):
news_df[col] = news_df[col].astype("float32")
if dtype == np.dtype('int64'):
news_df[col] = news_df[col].astype("int32")
def load_train_dfs():
"""
define switchable loader to debug in local with the sample data
:return:
"""
try:
from kaggle.competitions import twosigmanews
env = twosigmanews.make_env()
(market_train_df, news_train_df) = env.get_training_data()
except:
market_train_df = pd.read_csv(TEST_MARKET_DATA, encoding="utf-8", engine="python")
news_train_df =
|
pd.read_csv(TEST_NEWS_DATA, encoding="utf-8", engine="python")
|
pandas.read_csv
|
import logging
import os
from datetime import timedelta
from timeit import default_timer as timer
import pandas as pd
from augmentation.neo4j_utils import get_nodes_with_pk_from_table
from augmentation.train_algorithms import train_CART, train_ID3, train_XGBoost
datasets = {
"other-data/decision-trees-split/football/football.csv": ["win", "id"],
"other-data/decision-trees-split/kidney-disease/kidney_disease.csv": ["classification", "id"],
"other-data/decision-trees-split/steel-plate-fault/steel_plate_fault.csv": ["Class", "index"],
"other-data/decision-trees-split/titanic/titanic.csv": ["Survived", "PassengerId"]
}
folder_name = os.path.abspath(os.path.dirname(__file__))
algorithms = ['CART', 'ID3', 'XGBoost']
def join_all(dataset_path: str) -> ():
next_nodes = []
visited = []
ids = []
id_map = {}
filename = dataset_path.split('/')[-1]
join_all_filename = f"join-all-{filename}"
joined_path = f"../joined-df/{join_all_filename}"
joined_df = None
next_nodes.append(dataset_path)
while len(next_nodes) > 0:
path = next_nodes.pop()
join_path = get_nodes_with_pk_from_table(path)
from_source = join_path[0][0]
visited.append(from_source)
reduced_join_path = []
for source, target in join_path:
reduced_join_path.append(target)
print(reduced_join_path)
for target in reduced_join_path:
base_source = '%s' % join_all_filename
filepath = os.path.join(folder_name, f"../joined-df/{base_source}")
if joined_df is None:
base_source = '%s' % from_source
filepath = os.path.join(folder_name, f"../{base_source}")
base_df = pd.read_csv(filepath, header=0, engine="python", encoding="utf8", quotechar='"',
escapechar='\\')
from_id = target[0]
to_source = target[1]
to_id = target[2]
if to_source in visited:
continue
if from_source in id_map:
from_ids = list(filter(lambda x: x[0] == from_id, id_map[from_source]))
if len(from_ids) > 0:
from_id = from_ids[0][1]
print(from_id)
else:
id_map[from_source] = []
if to_source in id_map:
to_ids = list(filter(lambda x: x[0] == to_id, id_map[to_source]))
if len(to_ids) > 0:
to_id = to_ids[0][1]
print(to_id)
else:
id_map[to_source] = []
next_nodes.append(to_source)
neighbour_filepath = os.path.join(folder_name, f"../{to_source}")
neighbour_df = pd.read_csv(neighbour_filepath, header=0, engine="python", encoding="utf8",
quotechar='"',
escapechar='\\')
logging.info(
f'\rJoin dataset {base_source} and {to_source} on left column: {from_id} and right column: {to_id}')
joined_df = pd.merge(base_df, neighbour_df, how="left", left_on=from_id, right_on=to_id,
suffixes=("", f"/{to_source}"))
for col in joined_df.columns:
if f"{to_source}" in col:
id_map[to_source].append((col.split('/')[0], col))
joined_df.to_csv(joined_path, index=False)
ids.append(from_id)
ids.append(to_id)
return ids, joined_path
def curate_df(ids, joined_path):
joined_df = pd.read_csv(joined_path, header=0, engine="python", encoding="utf8",
quotechar='"',
escapechar='\\')
drop_columns = list(set(joined_df.columns) & set(ids))
for col in ids:
drop_columns = drop_columns + list(filter(lambda x: col in x, joined_df.columns))
joined_df.drop(columns=drop_columns, inplace=True)
joined_df.to_csv(joined_path, index=False)
return joined_df
def join_all_baseline(data):
results = []
for path, features in data.items():
label = features[0]
ids = features[1]
start_join = timer()
ids, path = join_all(path)
dataset = curate_df(ids, path)
print("\tEncoding data")
df = dataset.apply(lambda x: pd.factorize(x)[0])
X = df.drop(columns=[label])
print(X.columns)
y = df[label]
end_join = timer()
start_train = timer()
accuracy, params = train_CART(X, y)
end_train = timer()
res = {
'dataset': path,
'accuracy': accuracy,
'algorithm': 'CART',
'runtime': timedelta(seconds=((end_join - start_join) + (end_train - start_train)))
}
res.update(params)
results.append(res)
start_train = timer()
accuracy = train_ID3(X, y)
end_train = timer()
res = {
'dataset': path,
'accuracy': accuracy,
'algorithm': 'ID3',
'runtime': timedelta(seconds=((end_join - start_join) + (end_train - start_train)))
}
results.append(res)
start_train = timer()
accuracy, params = train_XGBoost(X, y)
end_train = timer()
res = {
'dataset': path,
'accuracy': accuracy,
'algorithm': 'XGBoost',
'runtime': timedelta(seconds=((end_join - start_join) + (end_train - start_train)))
}
res.update(params)
results.append(res)
print(results)
df =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from scipy.stats import norm
from src.models import double_exponential_smoothing as holt
from src.models import k_means
from src.models import seasonal_hmm as hmm
from src.helpers import data_helpers as helper
class SeasonalSwitchingModelResults:
def __init__(self, df, endog, date_header, trend, level, seasonal_info,
impacts, fitted_values, actuals, residuals):
self.df = df
self.endog = endog
self.date_header = date_header
self.trend = trend
self.level = level
self.seasonal_info = seasonal_info
self.impacts = impacts
self.fitted_values = fitted_values
self.actuals = actuals
self.residuals = residuals
def plot_seasonal_structures(self):
"""
Function for plotting the seasonal components.
"""
import matplotlib.pyplot as plt
plt.style.use('ggplot')
fig, axs = plt.subplots(self.seasonal_info['profile_count'])
seasonality_features = self.seasonal_info['seasonal_feature_sets']
i = 1
day_keys = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday',
6: 'Sunday'}
for state in seasonality_features:
cycle_points = []
seasonal_effects = []
upper_bounds = []
lower_bounds = []
state_features = seasonality_features[state]
for ind, row in state_features.iterrows():
cycle_points.append(ind)
point = row['SEASONALITY_MU']
upper_bound = norm.ppf(0.9, loc=row['SEASONALITY_MU'], scale=row['SEASONALITY_SIGMA'])
lower_bound = norm.ppf(0.1, loc=row['SEASONALITY_MU'], scale=row['SEASONALITY_SIGMA'])
seasonal_effects.append(point)
upper_bounds.append(upper_bound)
lower_bounds.append(lower_bound)
weekdays = [day_keys[day] for day in cycle_points]
axs[i-1].plot(weekdays, seasonal_effects, color='blue')
axs[i-1].plot(weekdays, upper_bounds, color='powderblue')
axs[i-1].plot(weekdays, lower_bounds, color='powderblue')
axs[i-1].fill_between(weekdays, lower_bounds, upper_bounds, color='powderblue')
axs[i-1].set_title('Seasonal Effects of State {}'.format(i))
i += 1
plt.show()
def predict(self, n_steps, from_date=None, exog=None):
"""
Predict function for fitted seasonal switching model
Args:
- n_steps: int, horizon to forecast over
Returns:
- predictions: list, predicted values
"""
# set up the prediction data frame
pred_df = self.create_predict_df(n_steps, from_date)
if from_date is None:
# extract the level
level = self.level
else:
level = self.level + (from_date - max(self.df[self.date_header])).days * self.trend
trend = self.trend
predictions = []
# generate level and trend predictions
for pred in range(n_steps):
level = level+trend
predictions.append(level)
if self.seasonal_info['level'] is not None:
# set secondary containers to store predicted seasonal values
seasonal_values = []
state_mu_params = []
cycle_states = pred_df['CYCLE'].tolist()
seasonality_features = self.seasonal_info['seasonal_feature_sets']
# extract state parameter sets
for state in seasonality_features:
param_set = seasonality_features[state]
state_mu_param_set = param_set['SEASONALITY_MU'].tolist()
state_mu_params.append(state_mu_param_set)
# get the initial state probability (last observation in the fitted model), the transition matrix,
# and set the mu parameters up for linear algebra use
state_mu_params = np.array(state_mu_params).T
observation_probabilities = self.seasonal_info['profile_observation_probabilities']
initial_prob = observation_probabilities[-1]
transition_matrix = self.seasonal_info['profile_transition_matrix']
# predict seasonal steps
for pred in range(n_steps):
cycle = cycle_states[pred]
future_state_probs = sum(np.multiply(initial_prob, transition_matrix.T).T) / np.sum(
np.multiply(initial_prob, transition_matrix.T).T)
weighted_mu = np.sum(np.multiply(future_state_probs, state_mu_params[cycle, :]))
seasonal_values.append(weighted_mu)
initial_prob = future_state_probs
else:
seasonal_values = [1]*n_steps
# generate final predictions by multiplying level+trend*seasonality
predictions = np.multiply(predictions, seasonal_values).tolist()
if self.impacts is not None and exog is not None:
predictions += np.dot(exog, self.impacts)
return predictions
def create_predict_df(self, n_steps, from_date):
"""
Set up DF to run prediction on
Args:
- n_steps: int, horizon to forecast over
Returns:
- pred_df: df, prediction horizon
"""
if from_date is None:
pred_start = max(self.df[self.date_header]) + pd.DateOffset(days=1)
else:
pred_start = from_date
pred_end = pred_start + pd.DateOffset(days=n_steps-1)
pred_df = pd.DataFrame({self.date_header: pd.date_range(pred_start, pred_end)})
if self.seasonal_info['level'] == 'weekly':
pred_df['CYCLE'] = pred_df[self.date_header].dt.weekday
return pred_df
class SeasonalSwitchingModel:
def __init__(self,
df,
endog,
date_header,
initial_level,
level_smoothing,
initial_trend,
trend_smoothing,
seasonality='weekly',
max_profiles=10,
anomaly_detection=True,
exog=None,
_lambda=0.1):
self.df = df
self.endog = endog
self.date_header = date_header
self.initial_level = initial_level
self.level_smoothing = level_smoothing
self.initial_trend = initial_trend
self.trend_smoothing = trend_smoothing
self.max_profiles = max_profiles
self.seasonality = seasonality
self.anomaly_detection = anomaly_detection
self.exog = exog
self._lambda = _lambda
def fit(self):
'''
Parent function for fitting the seasonal switching model to a timeseries.
The seasonal switching model is designed specifically to model timeseries with multiple seasonal
states which are "hidden" via a HMM, while modeling trend and level components through double exponential
smoothing.
Users can also include exogenous regressors as to include impacts of events.
Returns:
- SeasonalSwitchingModelResults: a class housing the fitted results
'''
# extract the timeseries specifically from the df provided
timeseries = self.df[self.endog].tolist()
# pass the time series through an anomaly filter
if self.anomaly_detection:
timeseries_df = self.df[[self.endog, self.date_header]].copy()
timeseries_df[self.endog] = helper.Helper().anomaly_filter(timeseries_df[self.endog])
else:
timeseries_df = self.df[[self.endog, self.date_header]].copy()
# decompose trend and level components using double exponential smoothing
decomposition_df, trend, level = self.fit_trend_and_level(timeseries_df)
try:
# estimate the seasonal profiles to the partially decomposed timeseries via an cluster analysis
seasonal_profiles = self.estimate_seasonal_profiles(decomposition_df)
# extract the observed seasonality (decomposed timeseries) and the cycle (a point in the seasonal cycle)
seasonal_observations = decomposition_df['OBSERVED_SEASONALITY'].tolist()
cycle_states = decomposition_df['CYCLE'].tolist()
# fit the seasonal switching HMM
fitted_seasonal_values, seasonality_transition_matrix, seasonality_features, observation_probabilities = \
hmm.SeasonalHiddenMarkovModel(self.n_profiles).fit(seasonal_profiles, seasonal_observations,
cycle_states)
# create dict with seasonal components
seasonal_components = {'level': self.seasonality,
'profile_count': self.n_profiles,
'seasonal_feature_sets': seasonality_features,
'profile_transition_matrix': seasonality_transition_matrix,
'profile_observation_probabilities': observation_probabilities,
'seasonal_fitted_values': fitted_seasonal_values}
except Exception as e:
print('Failure fitting seasonal components, reverting to double exponential smoothing')
print('Error was {}'.format(e))
fitted_seasonal_values = [1]*len(decomposition_df)
seasonal_components ={'level': None}
# perform a final fit as a multiplicative model, between the HMM and the trend/level fit
fitted_values = np.multiply(decomposition_df['LEVEL_TREND_DECOMPOSITION'], fitted_seasonal_values).tolist()
residuals = np.subtract(timeseries, fitted_values).tolist()
if self.exog is not None:
impacts = self.fit_event_impacts(residuals)
fitted_values += np.dot(self.exog, impacts)
residuals = np.subtract(timeseries, fitted_values).tolist()
else:
impacts = None
# store and return class
results = SeasonalSwitchingModelResults(self.df, self.endog, self.date_header, trend,
level, seasonal_components, impacts,
fitted_values, timeseries, residuals)
return results
def fit_event_impacts(self, endog):
"""
Fit event impacts using ridge regression.
Args:
- endog: series, dependent variable
- _lambda: float, shrinkage parameter (increasing = increased sparsity)
Returns:
- coefficients: array, coefficient set
"""
exog = self.exog.copy()
coefficients = np.dot(np.dot(np.linalg.inv(np.dot(exog, exog.T)+self._lambda*np.identity(exog.shape[0])),
exog).T, endog)
return coefficients
def fit_trend_and_level(self, df):
"""
Fit the trend and level to the timeseries using double exponential smoothing
Args:
- df: dataframe, containing data for fit
Returns:
- decomposition_df: dataframe, containing level and trend decomposition fit
- trend: folat, current trend
- level: float, current level
"""
# extract the timeseries and begin forming the decomposition data frame
decomposition_df = df.copy()
# establish the "grain" (which cycle we're in) and the "cycle" (which point in the seasonal cycle)
if self.seasonality == 'weekly':
decomposition_df['GRAIN'] = decomposition_df.index//7
decomposition_df['ROLLING_GRAIN_MEAN'] = decomposition_df[self.endog].rolling(
7, min_periods=0).mean().tolist()
decomposition_df['CYCLE'] = decomposition_df[self.date_header].dt.weekday
else:
print("Seasonal profile not set to 'weekly', unable to fit seasona profiling")
# extract the training timeseries specifically
training_data = decomposition_df['ROLLING_GRAIN_MEAN']
projected, trend, level = holt.double_exponential_smoothing(training_data, self.initial_level, self.initial_trend,
self.level_smoothing, self.trend_smoothing)
# apply fit to the fit_df
decomposition_df['LEVEL_TREND_DECOMPOSITION'] = projected
# get the observed seasonality using the filtered values
decomposition_df['OBSERVED_SEASONALITY'] = (decomposition_df[self.endog]/
decomposition_df['LEVEL_TREND_DECOMPOSITION'])
return decomposition_df, trend, level
def estimate_seasonal_profiles(self, decomposition_df):
"""
This function estimates the seasonal profiles within our timeseries. This serves as the initial
estimates to the state-space parameters fed to the HMM.
Args:
- decomposition_df: a decomposed timeseries into level, trend, seasonality
Returns:
- seasonal_profiles: dict, a dictionary containing the seasonal profiles and their state space params
"""
# extract needed vars to create a cluster df
clustering_df = decomposition_df[['GRAIN', 'CYCLE', 'OBSERVED_SEASONALITY']]
# do a group by to ensure grain-cycle pairings
clustering_df = clustering_df.groupby(['GRAIN', 'CYCLE'], as_index=False)['OBSERVED_SEASONALITY'].agg('mean')
# Normalize the seasonal affects, reducing the impact of relatively large or small values on the search space
clustering_df['NORMALIZED_SEASONALITY'] = (clustering_df['OBSERVED_SEASONALITY']-
clustering_df['OBSERVED_SEASONALITY'].mean()
)/clustering_df['OBSERVED_SEASONALITY'].std()
# Remove any outliers from the cluster fit df. Given we are attempting to extract common seasonality, outliers
# simply inhibit the model
clustering_df['NORMALIZED_SEASONALITY'] = np.where(clustering_df['NORMALIZED_SEASONALITY']<-3, -3,
clustering_df['NORMALIZED_SEASONALITY'])
clustering_df['NORMALIZED_SEASONALITY'] = np.where(clustering_df['NORMALIZED_SEASONALITY']>3, 3,
clustering_df['NORMALIZED_SEASONALITY'])
# pivot the original timeseries to create a feature set for cluster analysis
cluster_fit_df = clustering_df.pivot(index='GRAIN', columns='CYCLE', values='NORMALIZED_SEASONALITY').reset_index()
cluster_fit_df.dropna(inplace=True)
cluster_fit_data = cluster_fit_df.iloc[:, 1:]
# do the same on the un-processed df, which will be used to ensure classification of all observations
cluster_pred_df = clustering_df.pivot(index='GRAIN', columns='CYCLE', values='NORMALIZED_SEASONALITY').reset_index()
cluster_pred_df.dropna(inplace=True)
cluster_pred_data = cluster_pred_df.iloc[:,1:]
# Fit the clustering model to extract common seasonal shapes
clusterer, self.n_profiles = k_means.run_kmeans_clustering(cluster_fit_data, self.max_profiles)
# apply a final predict to the un-processed df, assigning initial shapes to all observations
cluster_pred_df['CLUSTER'] = clusterer.predict(cluster_pred_data).tolist()
cluster_pred_df = cluster_pred_df[['GRAIN', 'CLUSTER']]
decomposition_df = decomposition_df.merge(cluster_pred_df, how='inner', on='GRAIN')
# store the initial seasonal profiles (assuming normal distribution of observations) in a dictionary to be used
# as state-space parameters in the HMM
seasonal_profiles = {}
for profile in range(self.n_profiles):
profile_df = decomposition_df[decomposition_df['CLUSTER'] == profile]
weekly_profile_mu = profile_df.groupby('CYCLE', as_index=False)['OBSERVED_SEASONALITY'].agg('mean')
weekly_profile_mu.rename(columns={'OBSERVED_SEASONALITY': 'SEASONALITY_MU'}, inplace=True)
weekly_profile_sigma = profile_df.groupby('CYCLE', as_index=True
)['OBSERVED_SEASONALITY'].agg('std').reset_index()
weekly_profile_sigma.rename(columns={'OBSERVED_SEASONALITY': 'SEASONALITY_SIGMA'}, inplace=True)
seasonal_profile = weekly_profile_mu.merge(weekly_profile_sigma, how='inner', on='CYCLE')
seasonal_profiles.update({'PROFILE_{}'.format(profile): seasonal_profile})
return seasonal_profiles
if __name__ == '__main__':
# Running main will run a single fit and predict step on a subset of the "testing_data.csv" data set
data = pd.read_csv('../nfl_timeseries_test_data.csv', parse_dates=['DATE'])
exog = pd.get_dummies(data['DATE'].dt.weekday, prefix='weekday')
print(max(data['DATE']))
exog['super_bowl'] = np.where(data['DATE'].isin([pd.to_datetime('2/8/2016')]), 1, 0)
exog_2 = pd.get_dummies(data['DATE'].dt.month, prefix='month')
exog = exog.merge(exog_2, left_index=True, right_index=True)
data.columns = data.columns.str.upper().str.strip()
data.sort_values('DATE', inplace=True)
fit_df = data
initial_level = fit_df['QUERIES'][:7].mean()
forecaster = SeasonalSwitchingModel(fit_df, 'QUERIES', 'DATE', initial_level, .2, 0, .2,
max_profiles=10, seasonality='weekly', anomaly_detection=True,
exog=exog, _lambda=25)
fitted_switching_model = forecaster.fit()
predictions = fitted_switching_model.predict(10, from_date=
|
pd.to_datetime('1/8/2017')
|
pandas.to_datetime
|
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index=pd.Int64Index([1]))]),
([pd.DataFrame(), pd.Series([None, 1., 2., 3.])],
[pd.DataFrame(), pd.Series([1., 2., 3.], index=pd.Int64Index([1, 2, 3]))]),
([pd.DataFrame({"a": [1., 2., None]}), pd.Series([])],
[pd.DataFrame({"a": [1., 2.]}), pd.Series([])])
])
def test_drop_nan(data, expected):
no_nan_1, no_nan_2 = drop_rows_with_nans(*data)
_check_equality(no_nan_1, expected[0], check_index_type=False)
_check_equality(no_nan_2, expected[1], check_index_type=False)
def test_rename_column_names_to_numeric():
X = np.array([[1, 2], [3, 4]])
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame(X))
X = pd.DataFrame({"<>": [1, 2], ">>": [2, 4]})
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame({0: [1, 2], 1: [2, 4]}))
X = ww.DataTable(pd.DataFrame({"<>": [1, 2], ">>": [2, 4]}), logical_types={"<>": "categorical", ">>": "categorical"})
X_renamed = _rename_column_names_to_numeric(X)
X_expected = pd.DataFrame({0: pd.Series([1, 2], dtype="category"), 1: pd.Series([2, 4], dtype="category")})
pd.testing.assert_frame_equal(X_renamed.to_dataframe(), X_expected)
assert X_renamed.logical_types == {0: ww.logical_types.Categorical, 1: ww.logical_types.Categorical}
def test_convert_woodwork_types_wrapper_with_nan():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.Series([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
y = _convert_woodwork_types_wrapper(pd.array([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
def test_convert_woodwork_types_wrapper():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", "a"], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", "a"], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.Series([True, False, True], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, True], dtype="bool"))
y = _convert_woodwork_types_wrapper(pd.array([True, False, True], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, True], dtype="bool"))
def test_convert_woodwork_types_wrapper_series_name():
name = "my series name"
series_with_name = pd.Series([1, 2, 3], name=name)
y = _convert_woodwork_types_wrapper(series_with_name)
assert y.name == name
def test_convert_woodwork_types_wrapper_dataframe():
X = pd.DataFrame({"Int series": pd.Series([1, 2, 3], dtype="Int64"),
"Int array": pd.array([1, 2, 3], dtype="Int64"),
"Int series with nan": pd.Series([1, 2, None], dtype="Int64"),
"Int array with nan": pd.array([1, 2, None], dtype="Int64"),
"string series": pd.Series(["a", "b", "a"], dtype="string"),
"string array": pd.array(["a", "b", "a"], dtype="string"),
"string series with nan": pd.Series(["a", "b", None], dtype="string"),
"string array with nan": pd.array(["a", "b", None], dtype="string"),
"boolean series": pd.Series([True, False, True], dtype="boolean"),
"boolean array": pd.array([True, False, True], dtype="boolean"),
"boolean series with nan": pd.Series([True, False, None], dtype="boolean"),
"boolean array with nan": pd.array([True, False, None], dtype="boolean")
})
X_expected = pd.DataFrame({"Int series": pd.Series([1, 2, 3], dtype="int64"),
"Int array": pd.array([1, 2, 3], dtype="int64"),
"Int series with nan": pd.Series([1, 2, np.nan], dtype="float64"),
"Int array with nan": pd.array([1, 2, np.nan], dtype="float64"),
"string series": pd.Series(["a", "b", "a"], dtype="object"),
"string array": pd.array(["a", "b", "a"], dtype="object"),
"string series with nan": pd.Series(["a", "b", np.nan], dtype="object"),
"string array with nan": pd.array(["a", "b", np.nan], dtype="object"),
"boolean series": pd.Series([True, False, True], dtype="bool"),
"boolean array": pd.array([True, False, True], dtype="bool"),
"boolean series with nan":
|
pd.Series([True, False, np.nan], dtype="object")
|
pandas.Series
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
|
tm.assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
"""
Source file that holds the controller of the application. All the logic of the application and the use
of the GUI elements lies here.
Classes in the source file:
* :func:`Controller`: Class that holds all the logic of the application and the manipulation of the GUI elements
that the :mod:`FPLViewer` source file holds.
"""
import datetime
import json
import logging
import os
import numpy as np
import pandas as pd
import requests
import best_15_optimisation as opt
class Controller(object):
"""
Class that holds all the logic of the application and the manipulation of the GUI elements
that the :mod:`FPLViewer` source file holds.
"""
def __init__(self, main_window):
self.logger = logging.getLogger(__name__)
self.main_window = main_window
self.popup = None
self.fpl_database_in_json = None
self.all_elements_df = None
self.element_types_df = None
self.teams_df = None
self.events_df = None
self.useful_player_attributes = None
self.columns_for_sorting = None
self.df_for_view = None
self.model = None
self.last_process = None
self.columns_for_sorting = ['total_points', 'now_cost', 'value', 'position', 'team_name', 'form', 'minutes',
'ict_index', 'ict_index_rank', 'goals_scored', 'assists', 'clean_sheets',
'bonus', 'selected_by_percent', 'transfer_diff', 'transfers_in', 'transfers_out']
self.columns_for_optimisation = ['total_points', 'value', 'form', 'ict_index']
# Populate the sort_value_button
self.main_window.select_sort_value_button.addItems(self.columns_for_sorting)
# Populate the find_best_15_button
self.main_window.select_best_15_value_button.addItems(self.columns_for_optimisation)
# Connections
self.main_window.menu.addAction('&Save Database', self.save_database_to_file)
self.main_window.menu.addAction('&Load Database from file', self.load_database_from_file)
self.main_window.menu.addAction('&Exit', self.main_window.close)
self.main_window.download_database_button.clicked.connect(self.get_fpl_database_in_json)
self.main_window.process_data_button.clicked.connect(self.process_data)
self.main_window.show_player_statistics_button.clicked.connect(self.show_player_statistics)
self.main_window.select_sort_value_button.activated.connect(self.display_sorted_statistics)
self.main_window.select_best_15_value_button.activated.connect(self.calculate_best_15_players)
self.main_window.most_valuable_position_button.clicked.connect(self.display_most_valuable_position)
self.main_window.most_valuable_teams_button.clicked.connect(self.display_most_valuable_teams)
self.main_window.save_useful_player_attributes_df_to_csv.clicked.connect(
self.save_useful_player_attributes_df_to_csv)
self.main_window.save_df_for_view_to_csv.clicked.connect(self.save_df_for_view_to_csv)
def get_fpl_database_in_json(self):
"""
Get the FPL database using the FPL's API.
"""
fpl_api_url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
try:
the_whole_db = requests.get(fpl_api_url)
except requests.RequestException as e:
self.main_window.set_status_display_text("An error has occurred while trying to download the database. "
"Please consult the log for details.")
self.logger.error("An error has occurred while trying to download the database.", exc_info=True)
else:
self.fpl_database_in_json = the_whole_db.json()
self.main_window.set_status_display_text("Database has been downloaded successfully.")
self.main_window.process_data_button.setDisabled(False)
def process_data(self):
"""
Extract the parts that we want to keep from the downloaded data and process them.
"""
try:
# Keep the data pieces that are needed for our application in pandas DataFrame format
self.all_elements_df = pd.DataFrame(self.fpl_database_in_json['elements'])
self.element_types_df =
|
pd.DataFrame(self.fpl_database_in_json['element_types'])
|
pandas.DataFrame
|
import pandas as pd
import random
import time
df = pd.DataFrame([])
total_rows = 0
def read_data(src):
"""
:param src: System Path of where the json files are
:return:
"""
global df, total_rows
df = pd.read_csv(src)
total_rows = len(df.index)
def indices_lottery():
""" Randomly picks which rows to add for the 70% (major_df) data and
30% (minor_df)
:return:
"""
minor_num = int(total_rows * 0.30)
minor_indices = random.sample(range(0, total_rows), minor_num)
minor_indices = set(minor_indices)
major_indices = set()
for index in range(0, total_rows):
if index not in minor_indices:
major_indices.add(index)
return major_indices, minor_indices
def split_data():
"""Splits the main data into 2 parts. One part(70%) goes into major_df
and the other (30%) into minor_df
:return:
"""
major_indices, minor_indices = indices_lottery()
column_titles = list(df.columns.values)
major_df =
|
pd.DataFrame([], columns=column_titles)
|
pandas.DataFrame
|
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 *
|
CBMonthEnd()
|
pandas._libs.tslibs.offsets.CBMonthEnd
|
# -*- coding: utf-8 -*-
import dask.bag as db
import pandas as pd
import pytest
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import build_cube_from_bag
from kartothek.io.eager_cube import build_cube
__all__ = (
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_no_store_factory",
"test_multifile",
"test_simple",
)
def test_simple(driver, function_store, function_store_rwro):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "foo": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
result = driver(cube=cube, store=function_store_rwro)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
stats_seed = result[cube.seed_dataset]
assert stats_seed["partitions"] == 2
assert stats_seed["files"] == 2
assert stats_seed["rows"] == 4
assert stats_seed["blobsize"] > 0
stats_enrich = result["enrich"]
assert stats_enrich["partitions"] == stats_seed["partitions"]
assert stats_enrich["files"] == stats_seed["files"]
assert stats_enrich["rows"] == stats_seed["rows"]
assert stats_enrich["blobsize"] != stats_seed["blobsize"]
def test_multifile(driver, function_store):
dfs = [
|
pd.DataFrame({"x": [i], "p": [0], "v1": [10]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 2 12:11:07 2017
@author: leo
Take table data and infer values that represent missing data to replace by
standard values for missing data.
Examples of missing data that can be found:
- 'XXX'
- 'NO_ADDR'
- '999999'
- 'NONE'
- '-'
TODO:
- For probable missing values, check entire file
"""
import string
import numpy as np
import pandas as pd
DEFAULT_THRESH = 0.6
def mv_from_letter_repetition(top_values, score=0.7):
"""Checks for unusual repetition of characters as in XX or 999999"""
# Compute number of unique characters for each value
num_unique_chars = pd.Series([len(set(list(x))) for x in top_values.index], index=top_values.index)
# Check that we have at least 3 distinct values
if len(num_unique_chars) >= 3:
# Keep as NaN candidate if value has only 1 unique character and
# at least two characters and other values have at least two disctinct
# characters
num_unique_chars.sort_values(inplace=True)
if (len(num_unique_chars.index[0]) > 1) \
and (num_unique_chars.iloc[0] == 1) \
and (num_unique_chars.iloc[1] >= 2):
return [(num_unique_chars.index[0], score, 'letter_repeted')]
return []
def mv_from_usual_forms(top_values, probable_missing_values, score=0.5):
"""Compares top values to common expressions for missing values"""
to_return = []
for val in top_values.index:
if val.lower() in [x.lower() for x in probable_missing_values]:
to_return.append((val, score, 'usual'))
return to_return
def mv_from_len_diff(top_values, score=0.3):
"""Check if all values have the same length except one"""
# Compute lengths of values
lengths = pd.Series([len(x) for x in top_values.index], index=top_values.index)
# Check if all values have the same length except one:
if (lengths.nunique() == 2) & (len(top_values) >= 4): # TODO: why ???
if lengths.value_counts().iloc[-1] == 1:
abnormal_length = lengths.value_counts().index[-1]
mv_value = lengths[lengths == abnormal_length].index[0]
return [(mv_value, score, 'diff')]
return []
def mv_from_len_ratio(top_values, score=0.2):
"""Check if one value is much shorter than others"""
# Compute lengths of values
lengths = pd.Series([len(x) for x in top_values.index], index=top_values.index)
if len(top_values) >= 4:
lengths.sort_values(inplace=True)
length_ratio = 2.9
if length_ratio * lengths.iloc[0] < lengths.iloc[1]:
mv_value = lengths.index[0]
return [(mv_value, score, 'len_ratio')]
return []
def mv_from_not_digit(top_values, score=1):
"""Check if one value is the only not digit (and is only text)"""
is_digit = pd.Series([x.replace(',', '').replace('.', '').isdigit()
for x in top_values.index], index=top_values.index)
if len(top_values) >= 3:
if (~is_digit).sum() == 1:
mv_value = is_digit[is_digit == False].index[0]
if mv_value.isalpha():
return [(mv_value, score/2. + score/2.*(len(top_values) >= 4),
'not_digit')]
return []
def mv_from_punctuation(top_values, score=1):
"""Check if value is only one with only punctuation"""
punct = string.punctuation + ' '
is_punct = pd.Series([all(y in punct for y in x) for x in top_values.index], index=top_values.index)
if (is_punct).sum() == 1:
mv_value = is_punct[is_punct].index[0]
return [(mv_value, score, 'punctuation')]
return []
def mv_from_common_values(all_top_values, score=0.5):
'''Looks for values common in at least two columns'''
# Create dict with: {value: set_of_columns_where_common} with values present in at least two columns
popular_values = dict()
for col_1, top_values_1 in all_top_values.items():
for col_2, top_values_2 in all_top_values.items():
if col_1 != col_2:
common_values = [x for x in top_values_1.index if x in top_values_2.index]
for val in common_values:
if val not in popular_values:
popular_values[val] = set([col_1, col_2])
else:
popular_values[val].add(col_1)
popular_values[val].add(col_2)
if popular_values:
# Questionable heuristic: return value most frequent
temp = [(val, len(cols)) for val, cols in popular_values.items()]
temp.sort(key=lambda x: x[1], reverse=True)
mv_value = temp[0][0]
return [(mv_value, score, 'common_values')]
return []
def mv_from_common_values_2(col_mvs, score=1):
"""
Return mv candidates for missing values that are already candidates in
at least two columns.
"""
# Make dict with key: mv_candidate value: list of columns where applicable
val_mvs = dict()
for col, tuples in col_mvs.items():
for (val, score, origin) in tuples:
if val not in val_mvs:
val_mvs[val] = [col]
else:
val_mvs[val].append(col)
return [(val, score, 'common_values') for val, cols in val_mvs.items() if (len(cols)>=2)]
def compute_all_top_values(tab, num_top_values):
'''
Returns a dict with columns of the table as keys and the top 10 most
frequent values in a pandas Series
'''
all_top_values = dict()
for col in tab.columns:
all_top_values[col] = tab[col].value_counts(True).head(num_top_values)
return all_top_values
def correct_score(list_of_possible_mvs, probable_mvs):
"""
Corrects original scores by comparing string distance to probable_mvs
INPUT:
list_of_possible_mvs: ex: [(mv, 0.3), (branch, 0.2)]
probable_mvs: ex ['nan', 'none']
OUTPUT:
list_of_possible_mvs: ex[(nan, 0.9), (branch, 0.1)]
"""
# Sum scores for same values detected by different methods in same column
new_list_of_possible_mvs_tmp = dict()
for (val, coef, orig) in list_of_possible_mvs:
if val not in new_list_of_possible_mvs_tmp:
new_list_of_possible_mvs_tmp[val] = dict()
new_list_of_possible_mvs_tmp[val]['score'] = coef
new_list_of_possible_mvs_tmp[val]['origin'] = [orig]
else:
new_list_of_possible_mvs_tmp[val]['score'] += coef
new_list_of_possible_mvs_tmp[val]['origin'].append(orig)
# NB: Taken care of in mv_from_usual_forms
# # If the value is a known form of mv, increase probability
# if val.lower() in [x.lower() for x in probable_mvs]:
# new_list_of_possible_mvs_tmp[val] += 0.5
# Reformat output like input
new_list_of_possible_mvs = []
for val, _dict in new_list_of_possible_mvs_tmp.items():
new_list_of_possible_mvs.append((val, _dict['score'], _dict['origin']))
return new_list_of_possible_mvs
def infer_mvs(tab, params=None):
"""
API MODULE
Run mv inference processes for each column and for the entire table
"""
PROBABLE_MVS = ['nan', 'none', 'na', 'n/a', '\\n', ' ', 'non renseigne', \
'nr', 'no value', 'null', 'missing value']
ALWAYS_MVS = ['']
if params is None:
params = {}
# Set variables and replace by default values
PROBABLE_MVS.extend(params.get('probable_mvs', []))
ALWAYS_MVS.extend(params.get('always_mvs', []))
num_top_values = params.get('num_top_values', 10)
# Compute most frequent values per column
all_top_values = compute_all_top_values(tab, num_top_values)
col_mvs = dict()
# Look at each column and infer mv
for col, top_values in all_top_values.items():
col_mvs[col] = []
if (not top_values.any()) or (top_values.iloc[0] == 1):
continue
col_mvs[col].extend(mv_from_len_diff(top_values))
col_mvs[col].extend(mv_from_len_ratio(top_values))
col_mvs[col].extend(mv_from_not_digit(top_values))
col_mvs[col].extend(mv_from_punctuation(top_values))
col_mvs[col].extend(mv_from_usual_forms(top_values, PROBABLE_MVS))
col_mvs[col].extend(mv_from_usual_forms(top_values, ALWAYS_MVS, 10**3))
col_mvs[col].extend(mv_from_letter_repetition(top_values))
col_mvs[col] = correct_score(col_mvs[col], PROBABLE_MVS)
col_mvs[col].sort(key=lambda x: x[1], reverse=True)
# Transfer output to satisfy API standards
def triplet_to_dict(val):
return {'val': val[0], 'score': val[1], 'origin': val[2]}
common_mvs = [triplet_to_dict(val) for val in mv_from_common_values_2(col_mvs)]
columns_mvs = {key:[triplet_to_dict(val) for val in vals] for key, vals in col_mvs.items()}
infered_mvs = {'columns': columns_mvs, 'all': common_mvs}
return {'mvs_dict': infered_mvs, 'thresh': 0.6} # TODO: remove harcode
def replace_mvs(tab, params):
"""
API MODULE
Replace the values that should be mvs by actual np.nan. Values in 'all'
will be replaced in the entire table whereas values in 'columns' will only
be replaced in the specified columns.
INPUT:
tab: pandas DataFrame to modify
params:
mvs_dict: dict indicating mv values with scores. For example:
{
'all': [],
'columns': {'dech': [('-', 2.0, 'unknown')],
'distance': [('-', 1, 'unknown')]}
}
thresh: minimum score to remove mvs
OUTPUT:
tab: same table with values replaced by np.nan
modified: Indicate if value was modified
"""
# Set variables and replace by default values
mvs_dict = params['mvs_dict']
thresh = params.get('thresh', DEFAULT_THRESH)
# Replace
assert sorted(list(mvs_dict.keys())) == ['all', 'columns']
# Run information
modified = pd.DataFrame(False, index=tab.index, columns=tab.columns)
for mv in mvs_dict['all']:
val, score = mv['val'], mv['score']
# run_info['replace_num']['all'][val] = 0
if score >= thresh:
# Metrics
modified = modified | (tab == val)
# Do transformation
tab.replace(val, np.nan, inplace=True)
for col, mv_values in mvs_dict['columns'].items():
for mv in mv_values:
val, score = mv['val'], mv['score']
if score >= thresh:
# Metrics
if tab[col].notnull().any():
modified[col] = modified[col] | (tab[col] == val)
# Do transformation
tab[col].replace(val, np.nan, inplace=True)
return tab, modified
def sample_mvs_ilocs(tab, params, sample_params):
'''
Displays interesting rows following inference
INPUT:
- tab: the pandas DataFrame on which inference was performed
- params: the result of infer_mvs
- sample_params:
- randomize: (default: True)
- num_per_missing_val_to_display: for each missing value found,
how many examples to display
OUTPUT:
- row_idxs: index values of rows to display
'''
# Select rows to display based on result
num_per_missing_val_to_display = sample_params.get('num_per_missing_val_to_display', 4)
randomize = sample_params.get('randomize', True)
thresh = params.get('thresh', DEFAULT_THRESH)
# TODO: add for ALL
row_idxs = []
for col, mvs in params['mvs_dict']['columns'].items():
if col in tab.columns:
for mv in mvs:
if mv['score'] >= thresh:
sel = (tab[col] == mv['val']).astype(int).diff().fillna(1).astype(bool)
sel.index = range(len(sel))
row_idxs.extend(list(sel[sel].index)[:num_per_missing_val_to_display])
for mv in params['mvs_dict']['all']:
if mv['score'] >= thresh:
sel = (tab == mv['val']).any(1).diff().fillna(True)
sel.index = range(len(sel))
if randomize:
new_indexes = np.random.permutation(list(sel[sel].index))[:num_per_missing_val_to_display]
else:
new_indexes = list(sel[sel].index)[:num_per_missing_val_to_display]
row_idxs.extend(new_indexes)
return row_idxs
if __name__ == '__main__':
file_paths = ['../../data/test_dedupe/participants.csv',
'../../data/test/etablissements/bce_data_norm.csv',
'local_test_data/source.csv',
'local_test_data/emmanuel_1/equipe.csv',
'local_test_data/emmanuel_1/doctorale.csv',
'local_test_data/emmanuel_1/laboratoire.csv',
'local_test_data/integration_4/hal2.csv']
file_path = file_paths[-1] # Path to file to test
nrows = 100000 # How many lines of the file to read for inference
encoding = 'utf-8' # Input encoding
tab =
|
pd.read_csv(file_path, nrows=nrows, encoding=encoding, dtype='unicode')
|
pandas.read_csv
|
import pandas as pd
import sys
from Plotting.plots import Plots
from pathlib import Path
import logging
import umap
def start(args):
plots = Path("results", "plots")
plots.mkdir(parents=True, exist_ok=True)
if args.r2score is True:
logging.info("Creating r2 score plots")
frames = []
if args.files is not None and args.legend is not None:
for i in range(len(args.files)):
data = pd.read_csv(args.files[i], sep=",")
if "Model" not in data.columns:
data["Model"] = args.legend[i]
frames.append(data)
else:
try:
linear_data = pd.read_csv(Path("results", "lr", "r2_scores.csv"), sep=",")
frames.append(linear_data)
except:
logging.info("Could not find linear regression r2 scores. Skipping...")
try:
ae_data = pd.read_csv(Path("results", "ae", "r2_scores.csv"), sep=",")
ae_data["Model"] = "AE"
frames.append(ae_data)
except:
logging.info("Could not find auto encoder regression r2 scores. Skipping...")
try:
dae_data = pd.read_csv(Path("results", "dae", "r2_scores.csv"), sep=",")
dae_data["Model"] = "DAE"
frames.append(dae_data)
except:
logging.info("Could not find denoising auto encoder regression r2 scores. Skipping...")
try:
dae_data = pd.read_csv(Path("results", "vae", "r2_scores.csv"), sep=",")
dae_data["Model"] = "VAE"
frames.append(dae_data)
except:
logging.info("Could not find denoising auto encoder regression r2 scores. Skipping...")
if len(frames) == 0:
print("No data found. Stopping.")
sys.exit()
r2_scores = pd.concat(frames)
Plots.r2_scores_combined(r2_scores, args.name)
if args.reconstructed is True:
print("Generating reconstructed markers plots.")
input_data = pd.read_csv(args.files[0], sep=",")
reconstructed_data = pd.read_csv(args.files[1], sep=",")
# Create individual heatmap
Plots.plot_reconstructed_markers(input_data, reconstructed_data, args.names[0])
if args.corr is True:
print("Generating correlation heatmaps.")
frames = []
for i in range(len(args.files)):
input_data = pd.read_csv(args.files[i], sep=",")
# Create individual heatmap
Plots.plot_corr_heatmap(input_data, f"{args.names[i]}")
input_data["File"] = args.names[i]
frames.append(input_data)
combined_correlations =
|
pd.concat(frames)
|
pandas.concat
|
"""Read any number of files and write a single merged and ordered file"""
import time
import fastparquet
import numpy as np
import pandas as pd
def generate_input_files(input_filenames, n, max_interval=0):
for i_fn in input_filenames:
df = _input_data_frame(n,
max_interval=max_interval,
relative_time_period=1000)
df.to_csv(i_fn)
def _input_data_frame(n, max_interval, relative_time_period):
"""
:param n: Number of timestamp entries
:param max_interval: Maximum time difference between non-monotonically
increasing entries
:param relative_time_period: Maximum time period between first and last
timestamp entries
:return:
"""
ts = 'timestamp'
# Timestamp (int) index
now = int(time.time())
low = now - relative_time_period
high = now
rel_time = np.random.randint(low=low, high=high, size=n)
rel_time.sort()
# Generate jitter in output: swap some times if < max_interval
# Do not swap consecutive pairs
one_diff = np.diff(rel_time, n=1)
one_diff = np.insert(one_diff, [0], max_interval)
two_diff = np.diff(rel_time, n=2)
two_diff = np.concatenate(([max_interval, max_interval], two_diff))
# Time difference less than max_interval
diff_lt_lbl = (one_diff < max_interval) & (two_diff < max_interval)
# Do not swap consecutive pairs
swap_lbl = np.random.rand(n) >= 0.5
lst_nonswap_lbl = ~np.roll(swap_lbl, shift=1)
nonconsec_swap_lbl = swap_lbl & lst_nonswap_lbl
# Randomly choose swaps among time difference less than max_interval
swap_diff_lt_lbl = nonconsec_swap_lbl & diff_lt_lbl
# Swap
for i, swap in enumerate(swap_diff_lt_lbl):
if swap:
rel_time[i-1], rel_time[i] = rel_time[i], rel_time[i-1]
index = pd.Index(data=rel_time, name=ts)
# Random data
data = {
'a': list(range(n))
}
return
|
pd.DataFrame(data=data, index=index)
|
pandas.DataFrame
|
'''Trains a simple deep NN linear regression (Multilayer perceptron) on the MNIST dataset. 28X28 images of digits
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from keras.layers import Dense
from keras.models import Model, Sequential
from keras import initializers
def kpi_returns(prices):
return ((prices-prices.shift(-1))/prices)[:-1]
def kpi_sharpeRatio():
risk_free_rate = 2.25 # 10 year US-treasury rate (annual) or 0
sharpe = 2
# ((mean_daily_returns[stocks[0]] * 100 * 252) - risk_free_rate ) / (std[stocks[0]] * 100 * np.sqrt(252))
return sharpe
def kpi_commulativeReturn():
return 2
def kpi_risk(df):
return df.std()
def kpi_sharpeRatio():
return 2
def softmax(z):
assert len(z.shape) == 2
s = np.max(z, axis=1)
s = s[:, np.newaxis]
e_x = np.exp(z - s)
div = np.sum(e_x, axis=1)
div = div[:, np.newaxis]
return e_x / div
def loss_log():
return 2
def loss_mse():
return 2
def loss_gdc():
return 2
def activation_sigmoid():
return 2
def plot_selected(df, columns, start_index, end_index):
"""Plot the desired columns over index values in the given range."""
# TODO: Your code here
# Note: DO NOT modify anything else!
#df = df[columns][start_index:end_index]
df.ix[start_index:end_index, columns]
df = normalize(df)
plot_data(df)
def plot_data(df, title="normalized Stock prices"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def plot_image(df, title):
plt.figure()
plt.imshow(df[0])#, cmap=plt.cm.binary)
plt.colorbar()
plt.gca().grid(False)
plt.title(title)
plt.show()
def plot_images(x,y, title):
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x[i], cmap=plt.cm.binary)
plt.xlabel(y[i])
plt.show()
def plot_stat_loss_vs_time(history_dict) :
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss , 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss over time')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
def plot_stat_accuracy_vs_time(history_dict) :
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc , 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b' , label='Validation acc')
plt.title('Training and validation accuracy over time')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
def plot_stat_train_vs_test(history):
hist = history.history
plt.xlabel('Epoch')
plt.ylabel('Error')
plt.plot(hist['loss'])
plt.plot(hist['val_loss'])
plt.title ('model loss')
plt.legend (['train Error', 'test Error'], loc='upper right')
plt.show()
# normalize to first row
def normalize(df):
return df/df.ix[0,:]
def normalize(x):
train_stats = x_train.describe()
return (x - train_stats['mean']) / train_stats['std']
def symbol_to_path(symbol, base_dir=""):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data_from_disc(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'GOOG' not in symbols: # add GOOG for reference, if absent
symbols.insert(0, 'GOOG')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
print(df_temp.head())
df = df.join(df_temp)
if symbol == 'GOOG': # drop dates GOOG did not trade
df = df.dropna(subset=["GOOG"])
return df
def get_data_from_web(symbol):
start, end = '2007-05-02', '2016-04-11'
data = web.DataReader(symbol, 'yahoo', start, end)
data=
|
pd.DataFrame(data)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.