prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
pd.Interval(79.2, 89.1, closed='right'),
pd.Interval(89.1, 99, closed='right'),
]
expected = pd.Series({num: ranges[int(num / 10)] for num in range(100)})
result = CutOperation(series=series, bins=10, method='bach')().sort_index()
compare_boundaries(expected, result)
ranges_wo_right = [
pd.Interval(0, 9.9, closed='left'),
pd.Interval(9.9, 19.8, closed='left'),
pd.Interval(19.8, 29.7, closed='left'),
pd.Interval(29.7, 39.6, closed='left'),
pd.Interval(39.6, 49.5, closed='left'),
pd.Interval(49.5, 59.4, closed='left'),
pd.Interval(59.4, 69.3, closed='left'),
pd.Interval(69.3, 79.2, closed='left'),
pd.Interval(79.2, 89.1, closed='left'),
pd.Interval(89.1, 99, closed='both'),
]
expected_wo_right = pd.Series({num: ranges_wo_right[int(num / 10)] for num in range(100)})
result_wo_right = CutOperation(series=series, bins=10, method='bach', right=False)().sort_index()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_boundary(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=bins, right=True)
result = CutOperation(series=series, bins=bins, right=True)()
compare_boundaries(expected, result)
def test_cut_w_ignore_index(engine) -> None:
bins = 3
p_series = pd.Series(data=[1, 2, 3, 4], name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
result = CutOperation(series=series, bins=bins, right=True, ignore_index=False)()
assert ['_index_0', 'a'] == list(result.index.keys())
result_w_ignore = CutOperation(series=series, bins=bins, right=True, ignore_index=True)()
assert ['a'] == list(result_w_ignore.index.keys())
def test_cut_w_include_empty_bins(engine) -> None:
bins = 3
p_series = | pd.Series(data=[1, 1, 2, 3, 6, 7, 8], name='a') | pandas.Series |
import datetime, os, pathlib, platform, pprint, sys
import fastai
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
#import sdv
import sklearn
import yellowbrick as yb
import imblearn
from imblearn.over_sampling import SMOTE
from fastai.tabular.data import TabularPandas
from fastai.tabular.all import FillMissing, Categorify, Normalize, tabular_learner, accuracy, ClassificationInterpretation, ShowGraphCallback, RandomSplitter, range_of
#from sdv.tabular import TVAE
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from yellowbrick.model_selection import CVScores, LearningCurve, ValidationCurve
from collections import Counter
from matplotlib import pyplot
seed: int = 14
# set up pretty printer for easier data evaluation
pretty = pprint.PrettyPrinter(indent=4, width=30).pprint
# set up pandas display options
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', 100) | pandas.set_option |
"""
:Authors: <NAME>
:Date: 11/24/2016
:TL;DR: this module is responsible for categorical and numerical columns transformations
"""
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
class TrainTransformations:
col_to_scaler, col_to_encoder, col_to_width_edges, col_to_depth_edges = ({} for _ in range(4))
def __init__(self, col_to_scaler, col_to_encoder, col_to_width_edges, col_to_depth_edges):
self.col_to_scaler = col_to_scaler
self.col_to_encoder = col_to_encoder
self.col_to_width_edges = col_to_width_edges
self.col_to_depth_edges = col_to_depth_edges
def _calc_optimal_num_of_bins(col):
"""
given a collection of numerical values returns the optimal num of bins according to Freedman-Diaconis rule
:param col: collection of numerical values
:return: optimal num of bins according to Freedman-Diaconis rule
"""
iqr = np.subtract(*np.percentile(col, [75, 25]))
h = int(np.ceil((2 * iqr) / (len(col) ** (1 / 3)))) + 1
optimal_n = int(np.round((max(col) - min(col)) / h))
return optimal_n
def _pct_rank_qcut(series, n, edges=None):
if edges is None:
edges = pd.Series([float(i) / n for i in range(n + 1)])
return series.rank(pct=1).apply(lambda x: (edges >= x).argmax()), edges
def _encode_categorical_columns(encode_df, expand_fit_df=None, col_to_encoder=None):
"""
given a pandas dataframe with categorical attributes returns encoded dataframe, dictionary mapping column to encoder
:param encode_df: pandas dataframe with categorical attributes
:param expand_fit_df: optional dataframe to expand encode_df labels
:param col_to_encoder: dictionary mapping each column to a transformer used when you want prefitted transformers
:return: encoded dataframe, dictionary mapping column to encoder
"""
# if there's another df passed we'll take it's labels into consideration
# so label encoder won't get tackled with new observations
if expand_fit_df is not None:
assert set(encode_df.columns).issubset(expand_fit_df.columns)
encode_df = encode_df.apply(
lambda col: col.cat.add_categories(
set(expand_fit_df[col.name].cat.categories).difference(col.cat.categories)))
if not col_to_encoder:
col_to_encoder = defaultdict(LabelEncoder)
encode_df.apply(
lambda col: col_to_encoder[col.name].fit(col.cat.as_ordered().cat.categories))
label_encoded_df = encode_df.apply(
lambda col: col_to_encoder[col.name].transform(col.cat.as_ordered().sort_values().values))
label_encoded_df.columns = ['ordered_%s' % col for col in label_encoded_df.columns]
return label_encoded_df, col_to_encoder
def _transform_categorical_columns(train_categorical_df, test_categorical_df=None, col_to_encoder=None):
"""
given a categorical dataframe returns transformed categorical dataframe based on col_to_encoder transformations
:param train_categorical_df: pandas dataframe with categorical attributes
:param test_categorical_df: pandas dataframe with categorical attributes
:param col_to_encoder: dictionary mapping each column to a transformer
:return: transformed categorical dataframe
"""
# assume there's an order - encode according to sort values
label_encoded_df, col_to_encoder = _encode_categorical_columns(encode_df=train_categorical_df,
expand_fit_df=test_categorical_df,
col_to_encoder=col_to_encoder)
# assume there is no order - dummify categorical data
dummiefied_categorical_df = pd.get_dummies(train_categorical_df,
prefix=train_categorical_df.columns.tolist())
dummiefied_categorical_df = dummiefied_categorical_df.apply(lambda col: col.astype('category'))
return label_encoded_df, dummiefied_categorical_df, col_to_encoder
def _transform_numerical_columns(train_numerical_df, col_to_scaler=defaultdict(MinMaxScaler)):
"""
given a numerical dataframe returns transformed numerical dataframe based on col_to_scaler transformations
:param train_numerical_df: pandas dataframe with numerical attributes
:param col_to_scaler: dictionary mapping each column to a transformer
:return: transformed numerical dataframe
"""
transformed_numerical_df = train_numerical_df.apply(
lambda col: col_to_scaler[col.name].fit_transform(col))
transformed_numerical_df = pd.DataFrame(data=transformed_numerical_df, index=train_numerical_df.index,
columns=train_numerical_df.columns)
return transformed_numerical_df, col_to_scaler
def discretize(numerical_df, col_to_width_edges=None, col_to_depth_edges=None, name_labels=False):
"""
given a numerical dataframe returns equal width and equal depth labeled dataframes and their bins dict
:param name_labels: boolean indicates whether to put string labels or int labels
:param numerical_df: pandas DataFrame of numerical attributes
:param col_to_width_edges: used when you want preset bins
:param col_to_depth_edges: used when you want preset bins
:return: equal_width_num_df, col_to_width_edges, equal_depth_num_df, col_to_depth_edges
"""
assert (isinstance(numerical_df, pd.DataFrame)) and (not numerical_df.empty), \
'numerical_df should be a valid pandas DataFrame'
is_edges_recieved = True
if (not col_to_width_edges) and (not col_to_depth_edges):
col_to_width_edges, col_to_depth_edges = {}, {}
is_edges_recieved = False
equal_width_num_df, equal_depth_num_df = pd.DataFrame(), pd.DataFrame()
for col_name, col in numerical_df.iteritems():
num_of_bins = _calc_optimal_num_of_bins(col)
if is_edges_recieved and (col_name in col_to_width_edges.keys()) and (col_name in col_to_depth_edges.keys()):
equal_width_col = pd.cut(col, bins=col_to_width_edges[col_name]) if name_labels else pd.cut(col, bins=
col_to_width_edges[col_name], labels=False)
equal_width_col.name = 'equal_w_%s' % col_name
equal_width_num_df.loc[:, equal_width_col.name] = equal_width_col
equal_depth_col, _ = _pct_rank_qcut(col, num_of_bins, edges=col_to_depth_edges[col_name])
equal_depth_col.name = 'equal_d_%s' % col_name
equal_depth_num_df.loc[:, equal_depth_col.name] = equal_depth_col
else:
if num_of_bins > 1:
equal_width_col, col_to_width_edges[col_name] = pd.cut(col, num_of_bins,
retbins=True) if name_labels else pd.cut(col,
num_of_bins,
labels=False,
retbins=True)
equal_width_col.name = 'equal_w_%s' % col_name
equal_width_num_df.loc[:, equal_width_col.name] = equal_width_col
equal_depth_col, col_to_depth_edges[col_name] = _pct_rank_qcut(col, num_of_bins)
equal_depth_col.name = 'equal_d_%s' % col_name
equal_depth_num_df.loc[:, equal_depth_col.name] = equal_depth_col
return equal_width_num_df, col_to_width_edges, equal_depth_num_df, col_to_depth_edges
def preprocess_train_columns(X_train, col_to_scaler=defaultdict(MinMaxScaler), X_test=None):
"""
given a pandas DataFrame and a PipelineResults object
returns a dataframe with columns ready for an ML model , categorical transformations list,
numerical transformations list
:param col_to_scaler: numerical scaler to apply on each of the numerical columns
:param X_train: pandas DataFrame
:param pipeline_results: class: 'PipelineResults'
:return: dataframe with columns ready for an ML model, categorical transformations list,
numerical transformations list
"""
assert (isinstance(X_train, pd.DataFrame)) and (not X_train.empty), 'X_train should be a valid pandas DataFrame'
col_to_width_edges, col_to_depth_edges = None, None
numerical_cols = X_train.select_dtypes(include=[np.number]).columns
categorical_cols = X_train.select_dtypes(include=['category']).columns
is_numerical = len(numerical_cols) > 0
is_categorical = len(categorical_cols) > 0
label_encoded_df, dummiefied_categorical_df, scaled_numerical_df, col_to_encoder = | pd.DataFrame() | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
# imports
import csv
import functools
import hashlib
import logging
import warnings
from os.path import isfile as isfile
import click
import fbprophet
import mlflow
import mlflow.pyfunc
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation, performance_metrics
ES_URL = "http://192.168.122.3:9200"
ES_INDEX = "logs-endpoint-winevent-security-*"
FILTER = {"winlog.task": ":Logon"}
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
MODEL_PARAMS = {}
conda_env = "conda_running.yaml"
class FbProphetWrapper(mlflow.pyfunc.PythonModel):
def __init__(self, model):
self.model = model
super(FbProphetWrapper, self).__init__()
def load_context(self, context):
from fbprophet import Prophet
return
def predict(self, context, model_input):
model_input["ds"] = pd.to_datetime(model_input["ds"]).dt.tz_convert(None)
prediction = self.model.predict(model_input)
actual = model_input["y"]
merged = | pd.concat([prediction, actual], axis=1) | pandas.concat |
import anndata
import gzip
import os
import pandas as pd
import scipy.io
import tarfile
def load(data_dir, **kwargs):
fn = os.path.join(data_dir, "GSE164378_RAW.tar")
adatas = []
with tarfile.open(fn) as tar:
samples = ['GSM5008737_RNA_3P', 'GSM5008738_ADT_3P']
for sample in samples:
with gzip.open(tar.extractfile(sample + '-matrix.mtx.gz'), 'rb') as mm:
x = scipy.io.mmread(mm).T.tocsr()
obs = pd.read_csv(tar.extractfile(sample + '-barcodes.tsv.gz'), compression='gzip',
header=None, sep='\t', index_col=0)
obs.index.name = None
var = pd.read_csv(tar.extractfile(sample + '-features.tsv.gz'), compression='gzip',
header=None, sep='\t').iloc[:, :1]
var.columns = ['names']
var.index = var['names'].values
adata = anndata.AnnData(X=x, obs=obs, var=var)
adata.var_names_make_unique()
adatas.append(adata)
tar.close()
adata = adatas[0]
protein = adatas[1]
meta = pd.read_csv(os.path.join(data_dir, 'GSE164378_sc.meta.data_3P.csv.gz'), index_col=0)
adata.obs = adata.obs.join(meta)
adata.obsm['protein_expression'] = | pd.DataFrame(protein.X.A, columns=protein.var_names, index=protein.obs_names) | pandas.DataFrame |
import string
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
import pandas as pd
import requests as req
from bs4 import BeautifulSoup as bs
from tqdm import tqdm
BASE_LINK = 'https://www.nasdaq.com/screening/companies-by-name.aspx?{}&pagesize=200&page={}'
# can get all stocks at once setting pagesize equal to a huge number
full_link = 'https://www.nasdaq.com/screening/companies-by-name.aspx?pagesize=20000'
def add_stocks(letter, page, get_last_page=False):
"""
goes through each row in table and adds to df if it is a stock
returns the appended df
"""
df = pd.DataFrame()
res = req.get(BASE_LINK.format(letter, page))
soup = bs(res.content, 'lxml')
table = soup.find('table', {'id': 'CompanylistResults'})
stks = table.findAll('tr')
stocks_on_page = (len(stks) - 1) / 2
for stk in stks[1:]:
deets = stk.findAll('td')
if len(deets) != 7:
continue
company_name = deets[0].text.strip()
ticker = deets[1].text.strip()
market_cap = deets[2].text.strip()
# 4th entry is blank
country = deets[4].text.strip()
ipo_year = deets[5].text.strip()
subsector = deets[6].text.strip()
df = df.append(pd.Series({'company_name': company_name,
'market_cap': market_cap,
'country': country,
'ipo_year': ipo_year,
'subsector': subsector},
name=ticker))
if get_last_page:
# get number of pages
lastpage_link = soup.find('a', {'id': 'two_column_main_content_lb_LastPage'})
last_page_num = int(lastpage_link['href'].split('=')[-1])
return df, total_num_stocks, last_page_num
return df, stocks_on_page
def get_all_stocks():
"""
goes through each row in table and adds to df if it is a stock
returns the appended df
"""
df = | pd.DataFrame() | pandas.DataFrame |
import argparse
import os
from dataclasses import dataclass
from functools import lru_cache
import socket
from urllib.parse import parse_qsl, urlencode, urlparse
import flask
from cached_property import cached_property
from pathlib import Path
from typing import Dict, List, Optional, Union
import cv2
import pandas as pd
import numpy as np
import dash
from dash import Dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
from dash.exceptions import PreventUpdate
from flask import make_response
parser = argparse.ArgumentParser(
description="Run web-based ESV visualisation tool",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("esvs_pkl", type=Path, help="Path to extracted ESVs")
parser.add_argument("dataset_root", type=Path, help="Path dataset folder of videos")
parser.add_argument(
"classes_csv", type=Path, help="Path to CSV containing name,id entries"
)
parser.add_argument(
"--debug", action="store_true", help="Enable Dash debug capabilities"
)
parser.add_argument(
"--port", default=8080, type=int, help="Port for webserver to listen on"
)
parser.add_argument("--host", default="localhost", help="Host to bind to")
def load_video(video_path: Union[str, Path]) -> np.ndarray:
capture = cv2.VideoCapture(str(video_path))
frames = []
while capture.isOpened():
success, frame = capture.read()
if success:
frames.append(frame[..., ::-1]) # BGR -> RGB
else:
break
if len(frames) == 0:
raise ValueError(f"Could not load video from {video_path}")
return np.stack(frames)
@dataclass
class Result:
esvs: List[np.ndarray] # [n_frames_idx][frame_idx, class_idx]
scores: np.ndarray # [n_frames_idx, class_idx]
uid: str
label: int
sequence_idxs: List[np.ndarray] # [n_frames_idx][frame_idx]
results_idx: int
@property
def max_n_frames(self):
return max([len(s) for s in self.sequence_idxs])
class ShapleyValueResults:
def __init__(self, results):
self._results = results
@property
def uids(self) -> List[str]:
return list(self._results["uids"])
@property
def shapley_values(self) -> List[np.ndarray]:
# shapley_values[n_frames_idx][example_idx, frame_idx, class_idx]
return self._results["shapley_values"]
@property
def sequence_idxs(self) -> np.ndarray:
# sequence_idxs[n_frames_idx][example_idx]
return self._results["sequence_idxs"]
@property
def labels(self) -> np.ndarray:
return self._results["labels"]
@property
def scores(self) -> np.ndarray:
# sequence_idxs[n_frames_idx, example_idx, class_idx]
return self._results["scores"]
@property
def max_n_frames(self) -> int:
return len(self._results["scores"])
@cached_property
def available_classes(self) -> List[int]:
return sorted(np.unique(self.labels))
@cached_property
def class_counts(self) -> Dict[int, int]:
return pd.Series(self.labels).value_counts().to_dict()
@cached_property
def class_example_idxs_lookup(self) -> Dict[int, np.ndarray]:
return {
cls: np.nonzero(self.labels == cls)[0] for cls in self.available_classes
}
def __getitem__(self, idx: Union[int, str]):
if isinstance(idx, (int, np.int32, np.int64)):
example_idx = idx
elif isinstance(idx, str):
example_idx = self.uids.index(idx)
else:
raise ValueError(f"Cannot handle idx type: {idx.__class__.__name__}")
return Result(
esvs=[esvs[example_idx] for esvs in self.shapley_values],
scores=self.scores[:, example_idx],
uid=self.uids[example_idx],
label=self.labels[example_idx],
sequence_idxs=[
sequence_idxs[example_idx] for sequence_idxs in self.sequence_idxs
],
results_idx=example_idx,
)
def get_triggered_props():
ctx = dash.callback_context
return {trigger["prop_id"] for trigger in ctx.triggered}
class Visualisation:
def __init__(
self,
results: ShapleyValueResults,
class2str: Dict[int, str],
dataset_dir: Path,
title: str = "ESV Dashboard",
):
self.results = results
self.class2str = class2str
self.str2class = {v: k for k, v in class2str.items()}
self.dataset_dir = dataset_dir
self.title = title
def decode_other_classes(classes_str):
return list(map(int, classes_str.split(":")))
self.default_state = {
"n-frames": self.results.max_n_frames,
"uid": self.results.uids[0],
"selected-classes": [],
}
self.state_types = {
"uid": str,
"n-frames": int,
"selected-classes": decode_other_classes,
}
def extract_state_from_url(self, url):
components = urlparse(url)
query_string = parse_qsl(components.query)
state = self.default_state.copy()
for k, v in query_string:
state[k] = self.state_types[k](v)
return state
def load_result(self, cls, example_idx):
return self.results[self.results.class_example_idxs_lookup[cls][example_idx]]
def attach_to_app(self, app: Dash):
def app_layout():
return html.Div(
[dcc.Location(id="url", refresh=False), self.render_layout()]
)
app.layout = app_layout
self.attach_callbacks(app)
self.attach_routes(app)
def attach_routes(self, app: Dash):
@app.server.route("/videos/<uid>")
def load_video(uid: str):
path = self.dataset_dir / f"{uid}.webm"
return flask.send_from_directory(self.dataset_dir.absolute(), f"{uid}.webm")
@app.server.route("/frames/<uid>/<int:frame_idx>")
def load_frame(uid: str, frame_idx: int):
vid = self.load_video(uid)
frame = vid[frame_idx]
success, frame_jpeg = cv2.imencode(".jpg", frame[..., ::-1])
response = make_response(frame_jpeg.tobytes())
response.headers.set("Content-Type", "image/jpeg")
response.headers.set(
"Content-Disposition", "attachment", filename=f"{uid}-{frame_idx}.jpg"
)
return response
def get_cls_and_example_idx_for_uid(self, uid):
cls = self.results.labels[self.results.uids.index(uid)]
uids = np.array(self.results.uids)
class_uids = self.results.class_example_idxs_lookup[cls]
example_idx = list(uids[class_uids]).index(uid)
return cls, example_idx
def get_uid_from_cls_and_example_idx(self, cls, example_idx):
return np.array(self.results.uids)[self.results.class_example_idxs_lookup[cls]][
example_idx
]
def get_preds_df(self, result: Result, n_frames: int):
scores = result.scores[n_frames - 1]
classes = list(scores.argsort()[::-1][:10])
if result.label not in classes:
classes = classes[:-1] + [result.label]
entries = []
for i, cls in enumerate(classes):
class_name = (
self.class2str[cls]
.replace("something", "[...]")
.replace("Something", "[...]")
)
# We have to truncate labels on the x-axis so that they fit without all
# getting horribly cut off
max_len = 33
truncated_class_name = class_name
if len(class_name) >= max_len:
truncated_class_name = class_name[: max_len - len("...")] + "..."
entries.append(
{
"Idx": i,
"Class": class_name,
"TruncatedClass": truncated_class_name,
"ClassId": cls,
"Score": scores[cls],
}
)
return pd.DataFrame(entries)
def attach_callbacks(self, app: Dash):
@app.callback(
Output("class-dropdown", "value"),
Input("url", "href"),
)
def update_class_dropdown_value(href):
state = self.parse_state_from_url(href)
if "uid" not in state:
raise PreventUpdate
cls, _ = self.get_cls_and_example_idx_for_uid(state["uid"])
return cls
@app.callback(
Output("n-frames-slider", "value"),
Input("url", "href"),
)
def update_n_frames(href):
state = self.parse_state_from_url(href)
if "n-frames" not in state:
raise PreventUpdate
return state["n-frames"]
@app.callback(
Output("example-idx-slider", "value"),
Input("class-dropdown", "value"),
Input("url", "href"),
)
def update_example_slider_value(cls, href):
ctx = dash.callback_context
url_trigger = "url.href" in get_triggered_props()
state = self.parse_state_from_url(href)
if url_trigger and "uid" in state:
_, example_idx = self.get_cls_and_example_idx_for_uid(state["uid"])
return example_idx
return 0
@app.callback(
Output("example-idx-slider", "max"),
Output("example-idx-slider", "disabled"),
Output("example-idx-slider", "marks"),
Input("class-dropdown", "value"),
)
def update_example_slider(cls):
max_index = self.results.class_counts[cls] - 1
marks = {i: str(i) for i in range(max_index + 1)}
return max_index, max_index == 0, marks
@app.callback(
Output("model-preds-bar", "clickData"),
Output("model-preds-bar", "figure"),
Input("class-dropdown", "value"),
Input("example-idx-slider", "value"),
Input("n-frames-slider", "value"),
)
def update_scores(cls, example_idx, n_frames):
result = self.get_result(cls, example_idx)
return None, self.plot_preds(self.get_preds_df(result, n_frames))
@app.callback(
Output("state-uid", "children"),
Input("class-dropdown", "value"),
Input("example-idx-slider", "value"),
)
def update_uid(cls, example_idx):
idx = self.results.class_example_idxs_lookup[cls][example_idx]
return self.results.uids[idx]
@app.callback(
Output("esv-scatter", "figure"),
Input("state-uid", "children"),
Input("n-frames-slider", "value"),
Input("state-alt-class", "children"),
)
def update_esvs(uid, n_frames, alt_class_str):
try:
alt_class = int(alt_class_str)
except ValueError:
alt_class = None
result = self.results[uid]
return self.plot_esvs(result, n_frames, alt_class=alt_class)
@app.callback(
Output("esv-scatter", "hoverData"), Input("n-frames-slider", "value")
)
def update_esv_scatter_hover_data(_):
return None
@app.callback(
Output("state-alt-class", "children"),
Input("model-preds-bar", "clickData"),
Input("state-uid", "children"),
)
def update_selected_classes(clickData, uid):
if "state-uid" in get_triggered_props():
return ""
if clickData is not None:
cls = clickData["points"][0]["customdata"][0]
return str(cls)
return dash.no_update
@app.callback(
Output("current-frame-container", "children"),
Input("state-uid", "children"),
Input("esv-scatter", "hoverData"),
)
def update_selected_frame(uid, hoverData):
result = self.results[uid]
if hoverData is None or "state-uid.children" in get_triggered_props():
frame_index = 0
else:
frame_index = hoverData["points"][0]["x"]
return html.Img(src=f"/frames/{result.uid}/{frame_index}")
@app.callback(
Output("video-container", "children"),
Input("state-uid", "children"),
)
def update_video(uid):
return html.Video(src=f"/videos/{uid}", loop=True, autoPlay=True)
@app.callback(
Output("url", "search"),
[
Input("example-idx-slider", "value"),
Input("class-dropdown", "value"),
Input("n-frames-slider", "value"),
],
)
def update_url_params(example_idx, cls, n_frames):
state = {
"uid": self.get_uid_from_cls_and_example_idx(cls, example_idx),
"n-frames": n_frames,
}
params = urlencode(state)
return f"?{params}"
def render_layout(self):
idx = self.results.uids.index(self.default_state["uid"])
cls = self.results.labels[idx]
available_example_idxs = list(self.results.class_example_idxs_lookup[cls])
example_idx = available_example_idxs.index(idx)
return html.Div(
[
html.Div(html.H1(self.title)),
html.Div(
[
html.Div(
[
html.Label("Class: "),
dcc.Dropdown(
id="class-dropdown",
options=[
{
"label": self.class2str[cls],
"value": cls,
}
for cls in self.results.available_classes
],
value=cls,
),
],
className="control-element",
),
html.Div(
[
html.Label("Example: "),
dcc.Slider(
id="example-idx-slider",
min=0,
max=len(available_example_idxs),
disabled=False,
value=example_idx,
),
],
className="control-element",
),
html.Div(
[
html.Label("Frames fed to model: "),
dcc.Slider(
id="n-frames-slider",
min=1,
max=self.results.max_n_frames,
marks={
i: str(i)
for i in range(1, self.results.max_n_frames + 1)
},
value=self.results.max_n_frames,
),
],
className="control-element",
),
],
className="controls",
),
html.Hr(),
html.Div(
[
html.Div(
[
html.H2("Model Predictions"),
dcc.Graph(
id="model-preds-bar",
config={"displayModeBar": False},
responsive=True,
),
],
id="model-preds-bar-container",
),
html.Div(
[
html.H2("ESV Values"),
dcc.Graph(
id="esv-scatter",
config={"displayModeBar": False},
responsive=True,
# if we don't set the initial height of the graph it
# gets a height of 0 before it is updated when
# the user clicks on an alternate class which
# refreshes the height attribute of the Graph div.
style={"height": "450px"},
),
],
id="esv-scatter-container",
),
],
id="graph-pane",
),
html.Div(
[
html.Div(
[
html.Span("Hovered Frame:"),
html.Div(
id="current-frame-container",
),
]
),
html.Div(
[
html.Span("Orignal Video:"),
html.Div(
id="video-container",
),
]
),
],
id="video-pane",
),
html.A(
target="_blank",
href="https://www.youtube.com/watch?v=zoUJi6L6z0M&feature=youtu.be",
children=html.Div(id="help-btn", children=html.Div("?")),
),
html.Div(
id="state-uid",
children=self.default_state["uid"],
style={"display": "none"},
),
html.Div(id="state-alt-class", children="", style={"display": "none"}),
],
id="visualisation",
)
def plot_esvs(self, result: Result, n_frames: int, alt_class: Optional[int] = None):
classes = [result.label]
if alt_class is not None and alt_class != result.label:
classes.append(alt_class)
entries = []
for cls in classes:
for i in range(n_frames):
entries.append(
{
"Segment": i + 1,
"Frame": result.sequence_idxs[n_frames - 1][i],
"ESV": result.esvs[n_frames - 1][i, cls],
"Class": self.class2str[cls]
+ ("" if cls != result.label else " (GT)"),
}
)
df = | pd.DataFrame(entries) | pandas.DataFrame |
import pandas as pd
import numpy as np
import tensorflow as tf
import functools
'''
DATA FORMAT
- Dates: YEAR-MONTH-DAY
'''
# Define the unique key for all dataset entries
dataset_key = 'object_id'
# Rename labels for a selected dataframe aka columns
def rename_id_label(dataframe, old_label,new_label):
dataframe.rename(columns={old_label: new_label}, inplace=True)
# Convert key column to string and remove first to characters (as they are a letter followed by ":")
def standardize_heading(dataframe):
dataframe[dataset_key] = dataframe[dataset_key].astype(str).str[2:]
def update_columns(dataframe):
'''
New dataframe where we have one object_id per row
'''
# updated_active_startups = pd.DataFrame(columns=active_startups.columns)
dataframe.drop(columns=['pre_money_valuation_usd', 'pre_money_valuation', 'pre_money_currency_code',
'post_money_valuation_usd', 'post_money_valuation', 'post_money_currency_code'],
inplace=True)
# new columns for each fund round
fund_rounds_cols = list(
['funding_round_id', 'funded_at', 'funding_round_type', 'funding_round_code', 'raised_amount_usd',
'raised_amount',
'raised_currency_code', 'is_first_round', 'is_last_round', 'participants', 'source_description'])
fund_rounds_cols = fund_rounds_cols * 9
old_col_index = list(dataframe.columns)
new_col_index = old_col_index[1:22] + old_col_index[29:36] + old_col_index[37:39] + old_col_index[
36:37] + old_col_index[
39:40] + fund_rounds_cols + old_col_index[
43:49] + old_col_index[
22:29]
return new_col_index
# updated_active_startups.to_csv('../datasets/updated_active_startups.csv',encoding='utf-8')
def main():
csv_files = ['acquisitions', 'degrees','funding_rows', 'funds','investments', 'ipos','milestones','startups','offices','people']
acquistions = pd.read_csv("../datasets/CrunchBase_MegaDataset/acquisitions.csv")
degrees = pd.read_csv("../datasets/CrunchBase_MegaDataset/degrees.csv")
funding_rounds = pd.read_csv("../datasets/CrunchBase_MegaDataset/funding_rounds.csv")
funds = pd.read_csv("../datasets/CrunchBase_MegaDataset/funds.csv")
investments = pd.read_csv("../datasets/CrunchBase_MegaDataset/investments.csv")
ipos = pd.read_csv("../datasets/CrunchBase_MegaDataset/ipos.csv")
milestones = | pd.read_csv("../datasets/CrunchBase_MegaDataset/milestones.csv") | pandas.read_csv |
#!/usr/bin/env python3
import argparse
import math
import pandas as pd
import sys
from collections import namedtuple
from datetime import date
from enum import Enum
from pathlib import Path
from time import localtime, strftime
class Verbosity(Enum):
LOW=1
HIGH=2
Settings=namedtuple('Settings', ['datapath','verbosity'])
InvestmentDataDetails=namedtuple('InvestmentDataDetails', ['filename','columns','description'])
RRSPIncomeAccountTypes=['sdrsp','locked_sdrsp']
NonRRSPIncomeAccountTypes=['margin','tfsa']
IncomeAccountTypes=RRSPIncomeAccountTypes+NonRRSPIncomeAccountTypes
AccountTypes=IncomeAccountTypes+['resp']
ReportTypes=['monthly_income','monthly_income_growth','monthly_income_actual','monthly_income_schedule','tfsa_summary']
ReportFormats=['csv']
TransactionTypes=['buy','sell','xfer','cont','cont_limit','div','withdraw']
investment_data={'assets': InvestmentDataDetails(filename=Path('assets.csv'),
columns=['name','market','type','subtype','income_per_unit_period','sdrsp','locked_sdrsp','margin','tfsa','resp','income_freq_months','income_first_month','income_day_of_month'],
description='ledger of owned financial instruments'),
'monthly_income': InvestmentDataDetails(filename=Path('income_monthly.csv'),
columns=['name','sdrsp','locked_sdrsp','margin','tfsa','total_rrsp','total_nonrrsp','monthly_total','yearly_total'],
description='projected monthly income by account, including overall & RRSP and non-registered totals'),
'monthly_income_growth': InvestmentDataDetails(filename=Path('income_monthly_growth.csv'),
columns=['name','sdrsp','locked_sdrsp','margin','tfsa'],
description='monthly income growth by account'),
'monthly_income_schedule': InvestmentDataDetails(filename=Path('income_monthly_sched.csv'),
columns=['name','jan_rrsp','jan_nonrrsp','feb_rrsp','feb_nonrrsp','mar_rrsp','mar_nonrrsp','apr_rrsp','apr_nonrrsp','may_rrsp','may_nonrrsp','jun_rrsp','jun_nonrrsp','jul_rrsp','jul_nonrrsp','aug_rrsp','aug_nonrrsp','sep_rrsp','sep_nonrrsp','oct_rrsp','oct_nonrrsp','nov_rrsp','nov_nonrrsp','dec_rrsp','dec_nonrrsp'],
description='projected monthly income schedule'),
'monthly_income_actual': InvestmentDataDetails(filename=Path('income_monthly_actual.csv'),
columns=['name','sdrsp','locked_sdrsp','margin','tfsa','total_rrsp','total_nonrrsp','monthly_total','yearly_total'],
description='actual monthly income by account, including overall & RRSP and non-registered totals'),
'tfsa_summary': InvestmentDataDetails(filename=Path('tfsa_summary.csv'),
columns=['num','total'],
description='summarization of tfsa transactions'),
'transactions': InvestmentDataDetails(filename=Path('transactions.csv'),
columns=['date','type','name','account','xfer_account','units','unit_amount','fees','total'],
description='record of all asset transactions'),
}
def build_cmdline_parser():
clp_parser = argparse.ArgumentParser(prog='',
description='Command-line tool for investment management',
add_help=False)
clp_commands = clp_parser.add_subparsers(title='Portfolio management commands',
description='Execute transactions, create/print reports',
dest='command')
clp_command_list = clp_commands.add_parser('list',
help='display details of investment data')
clp_command_list.add_argument('list',
choices=investment_data.keys(),
help='display details of specified investment data')
clp_command_list.add_argument('--filter',
help="e.g. ((df['name']=='TD')|(df['name']=='ENB'))&(~(df['account']=='margin'))")
clp_command_list.add_argument('--tail',
type=int,
help="show only the specified number of rows of the end of the table")
clp_command_transact = clp_commands.add_parser('transact',
help='perform a transaction on an asset')
clp_command_transact.add_argument('type',
choices=TransactionTypes,
help='type of transaction')
clp_command_transact.add_argument('account',
choices=AccountTypes,
help='account in which transaction was executed')
clp_command_transact.add_argument('--xfer_account',
choices=AccountTypes,
help='the target account of a xfer transaction')
clp_command_transact.add_argument('name',
help='name of asset being transacted, or "cash"')
clp_command_transact.add_argument('units',
type=int,
help='number of units participating in transaction')
clp_command_transact.add_argument('amount',
type=float,
help='price of a stock unit, total dividend income, or contribution amount per unit')
clp_command_transact.add_argument('--date',
type=date.fromisoformat,
default=date.today(),
help='transaction date (e.g. "2021-03-31")')
clp_command_transact.add_argument('--fees',
type=float,
default=0.00,
help='total transaction fees (default: "0.00")')
clp_command_report = clp_commands.add_parser('report',
help='generate a report on investment data')
clp_command_report.add_argument('type',
choices=ReportTypes,
help='type of report to generate')
clp_command_report.add_argument('--format',
choices=ReportFormats,
default='csv',
help='how to format the report')
clp_command_datapath = clp_commands.add_parser('datapath',
help='location of the data files')
clp_command_datapath.add_argument('--set',
type=Path,
dest='path',
help='directory path for the data files')
clp_command_verbosity = clp_commands.add_parser('verbosity',
help='level of detail printed')
clp_command_verbosity.add_argument('--toggle',
action='store_true',
help='change from low to high, or vice versa')
clp_commands.add_parser('help', help='print help overview')
clp_commands.add_parser('quit', help='exit the command-line tool')
return clp_parser
def write_data_file(file_type, df, data_type, output_index):
if file_type=='csv':
fname=investment_data[data_type].filename
fname_ts=fname.with_stem(fname.stem+'_'+strftime("%Y-%m-%d-%H_%M_%S",localtime()))
df.to_csv(fname_ts, index=output_index)
df.to_csv(fname, index=output_index)
print(f'Data written to "{fname_ts}", and "{fname}" updated accordingly')
def gen_report_monthly_income():
report=pd.DataFrame()
output_index=False
float_format=lambda x: '$%.2f'%x
assets=pd.read_csv(investment_data['assets'].filename)
report_series={'name': assets['name']}
for account in IncomeAccountTypes:
report_series[account]=assets[account].mul(assets['income_per_unit_period']).divide(assets['income_freq_months'])
report_series['total_rrsp']=report_series['sdrsp'].add(report_series['locked_sdrsp'])
report_series['total_nonrrsp']=report_series['margin'].add(report_series['tfsa'])
report=pd.DataFrame(report_series)
monthly_by_account=pd.DataFrame([['TOTAL MONTHLY']+[series.sum() for label,series in report.items() if label!='name']],
columns=investment_data['monthly_income'].columns[:-2])
report=pd.concat([report,monthly_by_account],ignore_index=True)
report['monthly_total']=report['total_rrsp'].add(report['total_nonrrsp'])
monthly_totals=report[report['name']=='TOTAL MONTHLY']
report['yearly_total']=report['monthly_total'].mul(12)
monthly_totals=pd.DataFrame([['TOTAL YEARLY']+[series.sum()*12 for label,series in monthly_totals.items() if label!='name' and label!='yearly_total']],
columns=investment_data['monthly_income'].columns[:-1])
report=pd.concat([report,monthly_totals],ignore_index=True)
report.at[report.shape[0]-1,'yearly_total']=0
return report, output_index, float_format
def gen_report_monthly_income_sched():
report=pd.DataFrame()
output_index=True
float_format=lambda x: '$%.2f'%x
assets=pd.read_csv(investment_data['assets'].filename, index_col=0)
income={}
for account in AccountTypes:
income[account]=assets[account].mul(assets['income_per_unit_period'])
income['rrsp']=income['sdrsp'].add(income['locked_sdrsp'])
income['nonrrsp']=income['margin'].add(income['tfsa'])
for account in AccountTypes:
del income[account]
sched={col: [] for col in investment_data['monthly_income_schedule'].columns[1:]}
for row, (name, details) in enumerate(assets.iterrows()):
freq=details['income_freq_months']
starting_month=details['income_first_month']
for month in range(1,13):
if month>=starting_month:
inc_rrsp=income['rrsp'][row] if ((month-starting_month)%freq==0) else 0.0
inc_nonrrsp=income['nonrrsp'][row] if ((month-starting_month)%freq==0) else 0.0
else:
inc_rrsp=inc_nonrrsp=0.0
sched[investment_data['monthly_income_schedule'].columns[month*2-1]].append(inc_rrsp)
sched[investment_data['monthly_income_schedule'].columns[month*2]].append(inc_nonrrsp)
report=pd.DataFrame(data=sched,index=assets.index)
monthly_totals=pd.DataFrame([[series.sum() for label,series in report.items()]],
columns=investment_data['monthly_income_schedule'].columns[1:],
index=pd.Series(data={name:'TOTAL'},name='name'))
report=pd.concat([report,monthly_totals])
return report, output_index, float_format
def gen_report_monthly_income_growth():
report=pd.DataFrame()
output_index=True
float_format=lambda x: '%.4f%%'%x
income_files=investment_data['monthly_income'].filename
income_files=income_files.parent/income_files.stem
income_files=sorted(income_files.parent.glob(income_files.stem+'_2*.csv'), reverse=True)
if len(income_files)<2:
print('ERROR: No previous income file with which to compare for growth rates.')
return report, output_index
previous_income_file=income_files[1]
print(f'Growth rate as compared with: {previous_income_file}\n')
previous_income_df= | pd.read_csv(previous_income_file, index_col=0) | pandas.read_csv |
import os
import fnmatch
import calendar
import numpy as np
import pandas as pd
import xarray as xr
from itertools import product
from util import month_num_to_string
import xesmf as xe
"""
Module contains several functions for preprocessing S2S hindcasts.
Author: <NAME>, NCAR (<EMAIL>)
Contributions from <NAME>, NCAR
"""
def regrid_mask(ds, variable, reuse_weights=False):
"""
Function to regrid mcs obs mask onto coarser ERA5 grid (0.25-degree).
Args:
ds (xarray dataset): Mask file.
variable (str): variable.
reuse_weights (boolean): Whether to use precomputed weights to speed up calculation.
Defaults to ``False``.
Returns:
Regridded mask file for use with machine learning model.
"""
ds_out = xe.util.grid_2d(lon0_b=0-0.5, lon1_b=360-0.5, d_lon=1.,
lat0_b=-90-0.5, lat1_b=90, d_lat=1.)
regridder = xe.Regridder(ds, ds_out, method='nearest_s2d', reuse_weights=reuse_weights)
return regridder(ds[variable])
def create_cesm2_folders(variable, parent_directory, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create folders to place new variable files that were not preprocessed p1 (or other SubX priority).
Args:
variable (str): Name of variable (e.g., 'sst').
parent_directory (str): Directory to place files (e.g., '/glade/scratch/$USER/s2s/').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
if os.path.exists(parent_directory):
for yr, mo in product(np.unique(d1.strftime("%Y")), np.unique(d1.strftime("%m"))):
new_directory = 'CESM2/'+variable+'/'+yr+'/'+mo
path = os.path.join(parent_directory, new_directory)
try:
os.makedirs(path, exist_ok = True)
print("Directory '%s' created successfully" % new_directory)
except OSError as error:
print("Directory '%s' cannot be created" % new_directory)
if not os.path.exists(parent_directory):
print('Parent directory does not exist.')
return
def create_cesm2_files(variable, parent_directory, ensemble, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create CESM2 variable files that were not preprocessed p1 (or other SubX priority) variables.
Here we extract variable from daily file containing many variables to reduce memory usage.
Contain daily files in ``/temp/`` sub-folder.
Args:
variable (str): Name of variable in lower case (e.g., 'sst').
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
ensemble (str): Two digit ensemble member of hindcast (e.g., '09').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
for root, dirnames, filenames in os.walk(f'{parent_directory}CESM2/temp/'):
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if yr == '2016' and mo == '02' and dy == '29':
dy = '28'
for filename in fnmatch.filter(filenames, f'cesm2cam6v2*{yr}-{mo}-{dy}.{ensemble}.cam.h2.{yr}-{mo}-{dy}-00000.nc'):
ds = xr.open_dataset(root+filename)[variable.upper()]
ds.to_dataset(name=variable.upper()).to_netcdf(
f'{parent_directory}CESM2/{variable}/{yr}/{mo}/{variable}_cesm2cam6v2_{dy}{month_num_to_string(mo)}{yr}_00z_d01_d46_m{ensemble}.nc')
return
def create_cesm2_pressure_files(filelist, variable, pressure=300.):
"""
Create CESM2 variable files that were not preprocessed p1 (or other SubX priority) variables.
Here we extract variables on a pressure level from files containing many pressure levels
to reduce memory usage.
Args:
filelist (list of str): List of file names and directory locations.
variable (str): Name of variable in lower case (e.g., 'sst').
pressure (float): Pressure level. Defaults to ``300.``
"""
for fil in filelist:
ds = xr.open_dataset(fil).sel(lev_p=pressure).drop('lev_p')
ds.to_netcdf(f"{fil.split(variable)[0]}{variable}_temp{fil.split(variable)[1]}{fil.split('/')[-1]}")
return
def gpcp_filelist(parent_directory, variable='precip', start='1999-01-01', end='2019-12-31', freq='D'):
"""
Create list of daily GPCP Version 2.3 Combined Precipitation Data Set files.
https://www.ncei.noaa.gov/data/global-precipitation-climatology-project-gpcp-daily/access/
Args:
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
variable (str): Name of GPCP variable (e.g., 'precip').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'D' for daily.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
matches = []
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if mo == '02' and dy == '29':
continue # skip leap years
for root, dirnames, filenames in os.walk(f'{parent_directory}/'):
for filename in fnmatch.filter(filenames, f'*_daily_d{yr}{mo}{dy}_c*.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
return matches
def cesm2_filelist(variable, parent_directory, ensemble, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create list of variable files.
Args:
variable (str): Name of variable (e.g., 'zg_200').
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
ensemble (str or list of str): Two digit ensemble member of hindcast (e.g., '09') or list (e.g., ['00', '01']).
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
matches = []
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if mo == '02' and dy == '29':
dy = '28'
for root, dirnames, filenames in os.walk(f'{parent_directory}CESM2/{variable}/{yr}/{mo}/'):
if isinstance(ensemble, str):
for filename in fnmatch.filter(filenames, f'*_cesm2cam6v2_{dy}*_m{ensemble}.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
if isinstance(ensemble, list):
for ens in ensemble:
for filename in fnmatch.filter(filenames, f'*_cesm2cam6v2_{dy}*_m{ens}.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
return matches
def gpcp_climatology(filelist, variable='precip', save=False, author=None, parent_directory=None):
"""
Create GPCP Version 2.3 Combined Precipitation Data Set climatology.
Args:
filelist (list of str): List of file names and directory locations.
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
Defaults to None.
"""
if save:
assert isinstance(author, str), "Please set author for file saving."
assert isinstance(parent_directory, str), "Please set parent_directory to save file to."
clim = np.zeros((int(len(filelist)/365), 365, 180, 360))
doy = 0
yr = 0
dates = []
years = []
for num, file in enumerate(filelist):
ds = xr.open_dataset(file)
ds = ds[variable].isel(time=0)
dates.append( | pd.Timestamp(ds.time.values) | pandas.Timestamp |
import tensorflow as tf
import pandas as pd
import tensorflow_hub as hub
import os
import re
import numpy as np
from bert.tokenization import FullTokenizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
from tensorflow.keras import backend as K
# Initialize session
sess = tf.compat.v1.Session()
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# Download and process the dataset files.
def load_datasets():
train = pd.read_csv('data/train.csv')
test = | pd.read_csv('data/test.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import NMF
from sklearn.preprocessing import MinMaxScaler
def add_team_postfix(input_df):
output_df = input_df.copy()
top = output_df['inning'].str.contains('表')
output_df.loc[top, 'batter'] = output_df.loc[top, 'batter'] + '@' + output_df.loc[top, 'topTeam'].astype(str)
output_df.loc[~top, 'batter'] = output_df.loc[~top, 'batter'] + '@' + output_df.loc[~top, 'bottomTeam'].astype(str)
output_df.loc[ top, 'pitcher'] = output_df.loc[ top, 'pitcher'] + '@' + output_df.loc[ top, 'bottomTeam'].astype(str)
output_df.loc[~top, 'pitcher'] = output_df.loc[~top, 'pitcher'] + '@' + output_df.loc[~top, 'topTeam'].astype(str)
return output_df
def add_batter_order(input_df, is_train=True):
pass
def fill_na(input_df):
output_df = input_df.copy()
output_df['pitcherHand'] = output_df['pitcherHand'].fillna('R')
output_df['batterHand'] = output_df['batterHand'].fillna('R')
output_df['pitchType'] = output_df['pitchType'].fillna('-')
output_df['speed'] = output_df['speed'].str.extract(r'(\d+)').fillna(method='ffill')
output_df['ballPositionLabel'] = output_df['ballPositionLabel'].fillna('中心')
output_df['ballX'] = output_df['ballX'].fillna(0).astype(int)
output_df['ballY'] = output_df['ballY'].map({chr(ord('A')+i):i+1 for i in range(11)})
output_df['ballY'] = output_df['ballY'].fillna(0).astype(int)
output_df['dir'] = output_df['ballY'].map({chr(ord('A')+i):i+1 for i in range(26)})
output_df['dir'] = output_df['dir'].fillna(0).astype(int)
output_df['dist'] = output_df['dist'].fillna(0)
output_df['battingType'] = output_df['battingType'].fillna('G')
output_df['isOuts'] = output_df['isOuts'].fillna('-1').astype(int)
return output_df
def get_base_features(input_df, train_pitcher, test_pitcher, train_batter, test_batter):
output_df = input_df.copy()
output_df['inning'] = 2 * (output_df['inning'].str[0].astype(int) - 1) + output_df['inning'].str.contains('裏')
output_df['pitcherCommon'] = output_df['pitcher']
output_df['batterCommon'] = output_df['batter']
output_df.loc[~(output_df['pitcherCommon'].isin(train_pitcher & test_pitcher)), 'pitcherCommon'] = np.nan
output_df.loc[~(output_df['batterCommon'].isin(train_batter & test_batter)), 'batterCommon'] = np.nan
# label encoding
cat_cols = output_df.select_dtypes(include=['object']).columns
for col in cat_cols:
f = output_df[col].notnull()
output_df.loc[f, col] = LabelEncoder().fit_transform(output_df.loc[f, col].values)
output_df.loc[~f, col] = -1
output_df[col] = output_df[col].astype(int)
output_df['inningHalf'] = output_df['inning'] % 2
output_df['inningNumber'] = output_df['inning'] // 2
output_df['outCount'] = output_df['inning'] * 3 + output_df['O']
output_df['B_S_O'] = output_df['B'] + 4 * (output_df['S'] + 3 * output_df['O'])
output_df['b1_b2_b3'] = output_df['b1'] * 1 + output_df['b2'] * 2 + output_df['b3'] * 4
next_b = output_df.sort_values(['gameID', 'inning', 'O']).groupby(['gameID', 'inning'], group_keys=False)['b1', 'b2', 'b3'].shift(-1).rename(columns={'b1': 'n_b1', 'b2': 'n_b2', 'b3': 'n_b3'})
output_df = pd.merge(output_df, next_b, left_index=True, right_index=True)
def replace_b1(x):
if pd.isnull(x['n_b1']):
return x['b1']
else:
return x['n_b1']
def replace_b2(x):
if pd.isnull(x['n_b2']):
return x['b2']
else:
return x['n_b2']
def replace_b3(x):
if pd.isnull(x['n_b3']):
return x['b3']
else:
return x['n_b3']
output_df['n_b1'] = output_df.apply(replace_b2, axis=1)
output_df['n_b2'] = output_df.apply(replace_b2, axis=1)
output_df['n_b3'] = output_df.apply(replace_b3, axis=1)
output_df['plus_b1'] = output_df.apply(lambda x: x['b1'] < x['n_b1'], axis=1)
output_df['plus_b2'] = output_df.apply(lambda x: x['b2'] < x['n_b2'], axis=1)
output_df['plus_b3'] = output_df.apply(lambda x: x['b3'] < x['n_b3'], axis=1)
output_df['minus_b1'] = output_df.apply(lambda x: x['b1'] > x['n_b1'], axis=1)
output_df['minus_b2'] = output_df.apply(lambda x: x['b2'] > x['n_b2'], axis=1)
output_df['minus_b3'] = output_df.apply(lambda x: x['b3'] > x['n_b3'], axis=1)
return output_df
def aggregation(input_df, group_keys, group_values, agg_methods):
new_df = []
for agg_method in agg_methods:
for col in group_values:
if callable(agg_method):
agg_method_name = agg_method.__name__
else:
agg_method_name = agg_method
new_col = f'agg_{agg_method_name}_{col}_grpby_' + '_'.join(group_keys)
agg_df = input_df[[col]+group_keys].groupby(group_keys)[[col]].agg(agg_method)
agg_df.columns = [new_col]
new_df.append(agg_df)
new_df = pd.concat(new_df, axis=1).reset_index()
output_df = pd.merge(input_df, new_df, on=group_keys, how='left')
return output_df, list(new_df.columns)
def get_agg_gameID_inningHalf_features(input_df):
group_keys = ['subGameID', 'inningHalf']
group_values = ['S', 'B', 'b1', 'b2', 'b3']
agg_methods = ['mean', 'std']
output_df, cols = aggregation(
input_df, group_keys=group_keys, group_values=group_values, agg_methods=agg_methods)
return reduce_mem_usage(output_df)
'''
pivot table features
'''
def get_pivot_NMF9_features(input_df, n, value_col):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median)
sc0 = MinMaxScaler().fit_transform(np.median(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,0::2,:], axis=-1))
sc1 = MinMaxScaler().fit_transform(np.median(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,1::2,:], axis=-1))
nmf = NMF(n_components=n, random_state=2021)
nmf_df0 = pd.DataFrame(nmf.fit_transform(sc0), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF9T={x:02}')
nmf_df1 = pd.DataFrame(nmf.fit_transform(sc1), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF9B={x:02}')
nmf_df = pd.concat([nmf_df0, nmf_df1], axis=1)
nmf_df = pd.merge(
input_df, nmf_df, left_on='subGameID', right_index=True, how='left')
return reduce_mem_usage(nmf_df)
# pivot tabel を用いた特徴量
def get_pivot_NMF27_features(input_df, n, value_col):
pivot_df = pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median)
sc0 = MinMaxScaler().fit_transform(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,0::2].reshape(-1,27))
sc1 = MinMaxScaler().fit_transform(pivot_df.fillna(0).values.reshape(-1,54//3,3)[:,1::2].reshape(-1,27))
nmf = NMF(n_components=n, random_state=2021)
nmf_df0 = pd.DataFrame(nmf.fit_transform(sc0), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF27T={x:02}')
nmf_df1 = pd.DataFrame(nmf.fit_transform(sc1), index=pivot_df.index).rename(
columns=lambda x: f'pivot_{value_col}_NMF27B={x:02}')
nmf_df = pd.concat([nmf_df0, nmf_df1], axis=1)
nmf_df = pd.merge(
input_df, nmf_df, left_on='subGameID', right_index=True, how='left')
return reduce_mem_usage(nmf_df)
# pivot tabel を用いた特徴量
def get_pivot_NMF54_features(input_df, n, value_col):
pivot_df = | pd.pivot_table(input_df, index='subGameID', columns='outCount', values=value_col, aggfunc=np.median) | pandas.pivot_table |
import pandas as pd
import csv
from itertools import zip_longest
import os
import math
def readFiles(tpath):
txtLists = os.listdir(tpath)
return txtLists
def atan(x):
b = []
for i in x:
bb = ( math.atan(i) * 2 / math.pi)
b.append(bb)
b = pd.Series(b)
return b
def log(x,maxx):
b = []
for i in x:
bb = math.log10(i+1)/math.log10(maxx)
b.append(bb)
b = pd.Series(b)
return b
def SaveFCAs(path):
errorlist = []
count = 0
FreqC, FreqCR, FreqE, FreqER = find_max_freq()
print(FreqC, FreqCR, FreqE, FreqER)
CueC, CueCR, CueE, CueER, mCueE, mCueER, mCueC, mCueCR = find_max_cue()
print(CueC, CueCR, CueE, CueER)
print( mCueC, mCueCR, mCueE, mCueER)
f = 1
sta_schemas = pd.DataFrame(
columns=["Schema", "nClass", "CueE_n", "CueER_n", "FreqE_n", "FreqER_n", 'search', 'share', 'overall'])
for i in readFiles(path):
if 'csv' not in i:
continue
count += 1
name = i[:-8]
add1 = 'Cue/%s_Cue.csv' %name
add2 = 'Freq/%s_Freq.csv' %name
cue = pd.read_csv(add1)
# cue = cue.sort_values("Class")
freq = pd.read_csv(add2)
# freq = freq.sort_values("Class")
overallS = pd.DataFrame(columns=["Class", "CueE", "CueE_n", "CueER", "CueER_n", 'FreqE', 'FreqE_n','FreqER', 'FreqER_n','search','share','overall'])
cue = cue.fillna(0)
freq = freq.fillna(0)
print(count)
print("process:", name)
try:
l = cue.shape[0]
overallS['Class'] = cue['Class'][0:-1]
overallS.at["overall", 'Class'] = "Overall"
overallS['CueE'] = cue['CueE']
overallS['CueE_n'] = (cue['CueE'][0:-1]-mCueE)/(CueE-mCueE) if f == 0 else log(cue['CueE'][0:-1],CueE)
overallS.at["overall", 'CueE_n'] = math.log10(cue['CueE'][l-1]+1)/math.log10(CueC)
overallS['CueER'] = cue['CueER']
overallS['CueER_n'] = (cue['CueER'][0:-1]-mCueER)/(CueER-mCueER)
overallS.at["overall", 'CueER_n'] = cue['CueER'][l-1]/CueCR
overallS['FreqE'] = freq['FreqE']
overallS['FreqE_n'] = freq['FreqE'][0:-1]/FreqE if f== 0 else log(freq['FreqE'][0:-1],FreqE)
overallS.at["overall", 'FreqE_n'] = math.log10(freq['FreqE'][l-1]+1)/math.log10(FreqC)
overallS['FreqER'] = freq['FreqER']
overallS['FreqER_n'] = freq['FreqE'][0:-1]/FreqER
overallS.at["overall", 'FreqER_n'] = freq['FreqER'][l-1]/FreqCR
overallS['search'] = (overallS['CueER_n'] + overallS['CueE_n'])/2
overallS['share'] = (overallS['FreqER_n'] + overallS['FreqE_n'])/2
overallS['overall'] = (overallS['search'] + overallS['share'])/2
overallS.to_csv('OverallScore2/%s.csv' %name, index=0)
sta_schemas.at['%s' % name, 'Schema'] = name
sta_schemas.at['%s' % name, 'nClass'] = overallS.shape[0]-1
sta_schemas.at['%s' % name, 'CueE_n'] = overallS.at["overall", 'CueE_n']
sta_schemas.at['%s' % name, 'CueER_n'] = overallS.at["overall", 'CueER_n']
sta_schemas.at['%s' % name, 'FreqE_n'] = overallS.at["overall", 'FreqE_n']
sta_schemas.at['%s' % name, 'FreqER_n'] = overallS.at["overall", 'FreqER_n']
sta_schemas.at['%s' % name, 'search'] = overallS.at["overall", 'search']
sta_schemas.at['%s' % name, 'share'] = overallS.at["overall", 'share']
sta_schemas.at['%s' % name, 'overall'] = overallS.at["overall", 'overall']
except Exception as e:
print("Error:", name)
print(e)
errorlist.append([name, e])
sta_schemas.to_csv("sta_schemas.csv", index=0)
def find_max_freq():
FreqE, FreqER = 0, 0
FreqC, FreqCR = 0, 0
path1 = 'Freq/'
for i in readFiles(path):
if 'csv' not in i:
continue
add = path1 + i[0:-7] +'Freq.csv'
a = | pd.read_csv(add,index_col='Class') | pandas.read_csv |
#%%
import os
import glob
import itertools
import re
import numpy as np
import pandas as pd
import collections
import skbio
import git
#%%
# Find project parental directory
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define data directory
datadir = f"{homedir}/data/processed_sequencing/20210507_lacI_negctrl_library_mapping/"
# Define output dir
outputdir = f"{homedir}/data/barcodes/20210507_lacI_negctrl_library_mapping/"
# List fastq.gz file
fastq_file = glob.glob(f"{datadir}*WT_Lac*.fastq.gz")[0]
#%%
# Define operator sequences
# Forward operators
O1_rev = skbio.DNA("aattgtgagcggataacaatt".upper())
O2_rev = skbio.DNA("aaatgtgagcgagtaacaacc".upper())
O3_rev = skbio.DNA("ggcagtgagcgcaacgcaatt".upper())
# Reverse complement
O1 = O1_rev.reverse_complement()
O2 = O2_rev.reverse_complement()
O3 = O3_rev.reverse_complement()
operators = {
"O1": str(O1),
"O2": str(O2),
"O3": str(O3),
"O1_rev": str(O1_rev),
"O2_rev": str(O2_rev),
"O3_rev": str(O3_rev),
}
# Forward primer
fwd_prim = skbio.DNA("GCTTATTCGTGCCGTGTTAT").reverse_complement()
# Reverse primer
rev_prim = skbio.DNA("GGGCACAGCAATCAAAAGTA").reverse_complement()
# Define RNAP binding site
rnap = str(
skbio.DNA("TTTACACTTTATGCTTCCGGCTCGTATAATGTGTGG").reverse_complement()
)
# Define clone binding site
clone = str(skbio.DNA("gctagcCAATGCGGgagctc".upper()).reverse_complement())
#%%
def op_match(seq):
"""
Function to match the operator sequences
"""
# Loop through operators
for key, item in operators.items():
# Find operator and return boolean if found
op_pos = re.search(item, seq)
# If found return the operator and break loop
if bool(op_pos):
return [key] + [*op_pos.span()]
break
# If none match, return none
if not bool(op_pos):
return ["None", 0, 0]
def rev_comp(seq):
"""
Function that takes a string, converts it into skbio.DNA
and takes the reverse complement
"""
return str(skbio.DNA(seq, validate=False).reverse_complement())
#%%
print("Reading WTlac sequences into memory")
# Use skbio to have a generator to iterate over fastq
seqs = skbio.io.read(
f"{fastq_file}", format="fastq", verify="false", variant="illumina1.8"
)
# Initialize list to save sequences
seq_list = list()
# Initialize counter
counter = 0
# Define number of samples
n_samples = 10000
# Iterate over sequences
for seq in seqs: # itertools.islice(seqs, n_samples):
if counter % 10000 == 0:
print(f"reading seq #{counter}")
# Extract sequence information
seq_id = seq.metadata["id"]
sequence = str(skbio.DNA(sequence=seq, validate=False))
# Append to list
seq_list.append([seq_id, sequence])
counter += 1
# Initialize dataframe to save sequences
names = ["id", "sequence"]
df_seq = pd.DataFrame.from_records(seq_list, columns=names)
# Add index and sequence length to dataframe
df_seq["seq_len"] = df_seq["sequence"].apply(len)
print("Done reading sequences...")
print("Mapping operator sequences")
# Map operators
op_map = list()
# Loop through rows
for seq in df_seq.sequence:
op_map.append(op_match(seq))
df_seq = pd.concat(
[
df_seq,
pd.DataFrame.from_records(
op_map, columns=["operator", "op_begin", "op_end"]
),
],
axis=1,
)
# Reverse complement sequences which had a reversed operator
# Find forward sequences
bool_forward = ["_rev" in x and x != "None" for x in df_seq.operator]
# Reverse complement forward sequences
df_seq.loc[bool_forward, "sequence"] = [
rev_comp(seq) for seq in df_seq[bool_forward]["sequence"]
]
# Remap operators after having reversed sequences
op_map = list()
# Loop through rows
for seq in df_seq.sequence:
op_map.append(op_match(seq))
df_seq[["operator", "op_begin", "op_end"]] = pd.DataFrame.from_records(
op_map, columns=["operator", "op_begin", "op_end"]
)
print("Done mapping operators...")
print("Mapping forward and reverse primers")
# Initialize array to save primer start position
prim_pos = np.zeros([len(df_seq), 2], dtype=int)
# Loop through sequences
for i, seq in df_seq.iterrows():
# Search forward primer
fwd_pos = re.search(str(fwd_prim), seq["sequence"])
# Save position
if bool(fwd_pos):
prim_pos[i, 0] = fwd_pos.span()[0]
# Search reverse primer
rev_pos = re.search(str(rev_prim), seq["sequence"])
# Save position
if bool(rev_pos):
prim_pos[i, 1] = rev_pos.span()[0]
# Assing columns with information
df_seq = df_seq.assign(
fwd_prim=prim_pos[:, 0],
rev_prim=prim_pos[:, 1],
prim_dist=np.abs(prim_pos[:, 0] - prim_pos[:, 1]),
)
print("Done mapping primers...")
#%%
# Filtering sequences
print("Filtering sequences by:")
print("1. Filtering by separation between primers")
# Save original dataframe length
len_df = len(df_seq)
# Filter by length
df_filt = df_seq[df_seq["prim_dist"] == 82]
# Save rejected sequences
df_reject = df_seq[df_seq["prim_dist"] != 82][["id", "sequence"]]
df_reject = df_reject.assign(reject_by="primers_distance")
# Reset index
df_filt.reset_index(inplace=True, drop=True)
df_reject.reset_index(inplace=True, drop=True)
# Print percentage of sequences removed
print(
f"""
Cumulative percentage of original sequences removed:
{100 - np.round(len(df_filt) / len_df * 100, 2)}%
"""
)
#%%
print("2. Filtering by operator sequence and position")
# Copy filtered step
df = df_filt
# Remove sequences with no mapped operator
df_filt = df[df["operator"] != "None"]
# Store rejected sequences
df_r = df[df["operator"] == "None"][["id", "sequence"]]
df_r = df_r.assign(reject_by="operator_seq")
df_reject = df_reject.append(df_r, ignore_index=True)
# Reset index
df_filt.reset_index(inplace=True, drop=True)
df_reject.reset_index(inplace=True, drop=True)
# Compute the distance between the end of the primer and the operator
op_dist = (df_filt["op_begin"] - df_filt["rev_prim"]) - len(rev_prim)
# Copy filtered step
df = df_filt
# Select sequences
df_filt = df[op_dist == 0]
# Store rejected sequences
df_r = df[op_dist != 0][["id", "sequence"]]
df_r = df_r.assign(reject_by="operator_pos")
df_reject = df_reject.append(df_r, ignore_index=True)
# Reset index
df_filt.reset_index(inplace=True, drop=True)
df_reject.reset_index(inplace=True, drop=True)
# Print percentage of sequences removed
print(
f"""
Cumulative percentage of original sequences removed:
{np.round(100 - len(df_filt) / len_df * 100, 2)}%
"""
)
#%%
print("3. Filtering by RNAP binding site sequence and position")
# Initialize array to save RNAP position
rnap_pos = np.zeros(len(df_filt))
# Loop through sequences
for i, seq in df_filt.iterrows():
# Search RNAP sequence
rnap_re = re.search(str(rnap), seq["sequence"])
# Save position
if bool(rnap_re):
rnap_pos[i] = rnap_re.span()[0]
# Add column to dataframe
df_filt = df_filt.assign(rnap=rnap_pos)
# Compute RNAP distance
rnap_dist = (
(df_filt["rnap"] - df_filt["rev_prim"])
- len(rev_prim)
- len(operators["O1"])
)
# Copy filtered step
df = df_filt
# Select sequences
df_filt = df[rnap_dist == 0]
# Store rejected sequences
df_r = df[rnap_dist != 0][["id", "sequence"]]
df_r = df_r.assign(reject_by="rnap_pos")
df_reject = df_reject.append(df_r, ignore_index=True)
# Reset index
df_filt.reset_index(inplace=True, drop=True)
df_reject.reset_index(inplace=True, drop=True)
# Print percentage of sequences removed
print(
f"""
Cumulative percentage of original sequences removed:
{np.round(100 - len(df_filt) / len_df * 100, 2)}%
"""
)
#%%
print("4. Filtering by cloning site sequence and position")
# Initialize array to save clone position
clone_pos = np.zeros(len(df_filt))
# Loop through sequences
for i, seq in df_filt.iterrows():
# Search clone sequence
clone_re = re.search(str(clone), seq["sequence"])
# Save position
if bool(clone_re):
clone_pos[i] = clone_re.span()[0]
# Add column to dataframe
df_filt = df_filt.assign(clone=clone_pos)
# Compute clone distance
clone_dist = (df_filt["clone"] - df_filt["rev_prim"]) + len(clone)
# Copy filtered step
df = df_filt
# Select sequences
df_filt = df[(clone_dist == 0) & (clone_pos == 20)]
# Store rejected sequences
df_r = df[(clone_dist != 0) | (clone_pos != 20)][["id", "sequence"]]
df_r = df_r.assign(reject_by="clone_pos")
df_reject = df_reject.append(df_r, ignore_index=True)
# Reset index
df_filt.reset_index(inplace=True, drop=True)
df_reject.reset_index(inplace=True, drop=True)
# Print percentage of sequences removed
print(
f"""
Cumulative percentage of original sequences removed:
{np.round(100 - len(df_filt) / len_df * 100, 2)}%
"""
)
# Group by operator
df_group = df_filt.groupby("operator")
# Initialize dataframe to save outcome
names = ["operator", "sequence", "barcode", "counts"]
df_counts = | pd.DataFrame(columns=names) | pandas.DataFrame |
import pandas as pd
from datetime import datetime
from sapextractor.utils import constants
def apply(dataframe, dt_column, tm_column, target_column):
try:
if str(dataframe[dt_column].dtype) != "object":
print("a")
dataframe[dt_column] = dataframe[dt_column].apply(lambda x: x.strftime(constants.DATE_FORMAT_INTERNAL))
if str(dataframe[tm_column].dtype) != "object":
print("b")
dataframe[tm_column] = dataframe[tm_column].apply(lambda x: x.strftime(constants.HOUR_FORMAT_INTERNAL))
dataframe[target_column] = dataframe[dt_column] + " " + dataframe[tm_column]
print("c")
dataframe[target_column] = pd.to_datetime(dataframe[target_column], format=constants.TIMESTAMP_FORMAT)
print("d")
dataframe = dataframe.sort_values("event_timestamp")
dataframe = dataframe.dropna(subset=["event_timestamp"], how="any")
except:
print("e")
dataframe[dt_column] = pd.to_datetime(dataframe[dt_column], format=constants.DATE_FORMAT_INTERNAL)
print("f")
dataframe[tm_column] = | pd.to_datetime(dataframe[tm_column], format=constants.HOUR_FORMAT_INTERNAL) | pandas.to_datetime |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"dtype, input_data, to_replace, expected_data",
[
("bool", [True, False], {True: False}, [False, False]),
("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),
(
pd.IntervalDtype("int64"),
IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),
{pd.Interval(1, 2): pd.Interval(10, 20)},
IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),
),
(
pd.IntervalDtype("float64"),
IntervalArray([pd.Interval(1.0, 2.7), | pd.Interval(2.8, 3.1) | pandas.Interval |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import glob
import subprocess
from libraries.lib_percentiles import *
from libraries.lib_gtap_to_final import gtap_to_final
from libraries.lib_common_plotting_functions import greys, quint_colors, quint_labels
from libraries.lib_country_params import get_FD_scale_fac,iso_to_name
from libraries.lib_get_hh_survey import get_hh_survey#, get_miembros_hogar
from libraries.lib_survey_categories import get_dict_gtap_to_final
from libraries.lib_results_to_excel import save_to_results_file
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
mpl.rcParams['hatch.linewidth'] = 0.2
import seaborn as sns
div_pal = sns.color_palette('BrBG', n_colors=11)
def plot_expenditures_by_category(pais,hies_FD,hies_FD_tot):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
####################
# Plot expenditures by category
# --> as fraction of total expenditures
hies_FD = hies_FD.reset_index().set_index(['cod_hogar','quintile'])
hies_FD_tot = hies_FD_tot.reset_index().set_index(['cod_hogar','quintile'])
final_FD_quints = pd.DataFrame(index=hies_FD_tot.sum(level='quintile').index).sort_index()
# Reset df
do_not_plot = []
plt.figure(figsize=(6,6))
fdict = get_dict_gtap_to_final()
for _h in fdict:
hies_FD_tot[_h] = hies_FD[[fdict[_h][1]]].sum(axis=1)
final_FD_quints[_h] = 100.*(hies_FD_tot[['hhwgt',_h]].prod(axis=1)/hies_FD_tot['totex_hh']).sum(level='quintile')/hies_FD_tot['hhwgt'].sum(level='quintile')
_ = final_FD_quints.T.copy()
_.columns = ['Q1','Q2','Q3','Q4','Q5']
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_expenditures_table.csv'
try: hhexp = pd.read_csv(out_dir+'all_countries/hh_expenditures_table.csv').set_index('category')
except: hhexp = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhexp.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex].mean()
try: hhexp.to_csv(out_dir+'all_countries/hh_expenditures_table.csv')
except: pass
##########################################################################################
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_regressivity_table.csv'
for _q in ['Q1','Q2','Q3','Q4']:
try: hhreg = pd.read_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv').set_index('category')
except: hhreg = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhreg.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex,'Q1']/_.loc[_ex,'Q5']
try: hhreg.to_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv')
except: pass
##########################################################################################
_ = _[['Q1','Q5']].T.sort_values(by='Q1',axis=1)
null_col = []
for _c in _:
if round(_[_c].mean(),1)==0: null_col.append(_c)
if _[_c].mean()<0.1: do_not_plot.append(_c)
_ = _.drop(null_col,axis=1)
final_FD_quints.to_csv(out_dir+'expenditures/'+pais+'_gasto_by_cat_and_quint.csv')
col_wid=_.shape[1]/2
ax = plt.barh(np.arange(0,_.shape[1],1)*col_wid,_.iloc[0],color=sns.color_palette('BrBG', n_colors=11)[2],height=2.5)
plt.barh(np.arange(0,_.shape[1],1)*col_wid+2.5,_.iloc[1],color=sns.color_palette('BrBG', n_colors=11)[8],height=2.5)
plt.gca().grid(False)
sns.despine(bottom=True)
plt.gca().set_yticks(np.arange(0,_.shape[1],1)*col_wid+1)
plt.gca().set_yticklabels([fdict[_h][1] for _h in _.columns],ha='right',fontsize=10,weight='light',color=greys[7])
plt.gca().set_xticklabels([])
ax = plt.gca()
_y = [0.,0.]
rects = ax.patches
for rect in rects:
if (rect.get_y()+rect.get_height()/2.) > _y[0]:
_y.append(rect.get_y()+rect.get_height()/2.);_y.sort();_y.pop(0)
for rect in rects:
_w = rect.get_width()
pct = ''
if (rect.get_y()+rect.get_height()/2.) in _y: pct = '%'
ax.annotate(str(round(_w,1))+pct,xy=(rect.get_x()+rect.get_width()+0.5, rect.get_y()+rect.get_height()/2.-0.1),
ha='left', va='center',color=greys[7],fontsize=7,zorder=100,clip_on=False,style='italic')
ax.annotate('Wealthiest quintile',xy=(0.8,_y[1]),ha='left',va='center',color=greys[0],fontsize=7,zorder=100,style='italic')
ax.annotate('Poorest quintile',xy=(0.8,_y[0]),ha='left',va='center',color=greys[7],fontsize=7,zorder=100,style='italic')
plt.title('Household expenditures in '+iso_to_name[pais],weight='bold',color=greys[7],fontsize=12,loc='right')
plt.draw()
try:
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.png',format='png',bbox_inches='tight')
except: pass
plt.cla(); plt.close('all')
return hies_FD,hies_FD_tot,null_col
def plot_gtap_exp(pais,do_tax_food=True,verbose=False):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
############################
# Kuishuang's code (mostly):
# load household survey data
hh_hhsector = get_hh_survey(pais)
hh_hhsector = hh_hhsector.drop([i for i in hh_hhsector.columns if 'ing' in i or 'ict' in i],axis=1)
#hh_hhsector = hh_hhsector.fillna(1E5)#flag
if verbose: print(hh_hhsector.shape)
# load bridge matrix
xl = pd.ExcelFile('consumption_and_household_surveys/2017-10-13/Bridge_matrix_consumption_items_to_GTAP_power_sectors.xlsx')
if pais in xl.sheet_names: # all sheet names
print('using '+pais+' tab')
bridge_to_use = xl.parse(pais).fillna(0).drop(['Item_english'],axis = 1).set_index('Item') # read the specific sheet
else:
if verbose: print('using default tab')
bridge_to_use = xl.parse('nae_of_default_tab').fillna(0).drop(['Item_english'],axis = 1).set_index('Item')
cols_to_drop = []
for i in bridge_to_use.columns:
if verbose: print(i,bridge_to_use[i].sum())
if bridge_to_use[i].sum(axis=0)==0:
cols_to_drop.append(i)
bridge_to_use = bridge_to_use.drop(cols_to_drop,axis=1)
# household survey in GTAP sectors
hh_gtap_sector = hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
hh_gtap_sector = hh_gtap_sector.reset_index()
try: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('int')
except: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('str')
hh_gtap_sector = hh_gtap_sector.reset_index().set_index('cod_hogar')
## Run test.
#print(hh_hhsector.columns)
#print(hh_hhsector.head())
#_hh_hhsector = hh_hhsector.copy()
#for _c in _hh_hhsector.columns:
# if _c != 'gasto_ali':#and _c != 'gasto_alihogar':
# _hh_hhsector[_c] = 0
#_hh_gtap_sector = _hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
if verbose: print(hh_gtap_sector.head(8))
# calcuate each household's share of national consumption, by category
hh_share = (hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0))/(hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0).sum())
# Read household consumption vector from GTAP
_iot_code = pais if pais != 'brb' else 'xcb'
try:
hh_fd_file = 'GTAP_power_IO_tables_with_imports/Household_consumption_both_domestic_import.xlsx'
household_FD = get_FD_scale_fac(pais)*pd.read_excel(hh_fd_file,index_col=[0])[_iot_code].squeeze()
except:
if pais == 'brb': household_FD = get_FD_scale_fac(pais)*pd.read_excel('GTAP_power_IO_tables/xcbIOT.xlsx',sheet_name='Final_Demand',index_col=[0])['Hou'].squeeze()
else: assert(False)
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'
# Final demand matrix
hh_FD = household_FD*hh_share.fillna(0)
for i in hh_FD.columns: hh_FD[i]/=hh_hhsector['factor_expansion']
if verbose:
print(household_FD.head())
print(hh_FD.head(5))
####################
# Use gtap_to_final script to translate both expenditures & cc into HIES cats
hies_FD, hies_FD_tot, hies_sf = gtap_to_final(hh_hhsector,hh_FD,pais,verbose=True)
# Now, this df should be consistent with the FD vector
if verbose:
print((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum())
print(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum())
print('FD:',round(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum(),3),round((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),3))
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.999)
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.001)
####################
####################
if pais == 'brb':
energy_tax_total = get_FD_scale_fac(pais)*pd.read_csv('/Users/brian/Desktop/Dropbox/IDB/Barbados/output/tax_cost_to_hh_in_gtap_cats.csv').set_index('cod_hogar')
final_CC,wgts,_ = gtap_to_final(hh_hhsector,energy_tax_total,pais)
hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy().dropna()
final_CC_ind = final_CC.copy()
final_CC_CO2 = final_CC.copy()
final_CC_nonCO2 = final_CC.copy()
for col in final_CC_nonCO2.columns: final_CC_nonCO2[col].values[:] = 0
final_CC_dir = final_CC.copy()
for col in final_CC_dir.columns: final_CC_dir[col].values[:] = 0
#print(hhwgts.shape[0],hhwgts.dropna().shape[0])
# HACK: ^ should be no NAs in this df
else:
# Indirect carbon costs - CO2
ccdf_ind_CO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_CO2.csv').set_index('cod_hogar')
# Indirect carbon costs - non-CO2
ccdf_ind_nonCO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_nonCO2.csv').set_index('cod_hogar')
# Indirect carbon costs (allGHG)
ccdf_ind = get_FD_scale_fac(pais)* | pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_allGHG.csv') | pandas.read_csv |
"""
Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex
"""
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
PeriodIndex,
TimedeltaIndex,
date_range,
period_range,
)
import pandas._testing as tm
class EqualsTests:
def test_not_equals_numeric(self, index):
assert not index.equals(Index(index.asi8))
assert not index.equals(Index(index.asi8.astype("u8")))
assert not index.equals(Index(index.asi8).astype("f8"))
def test_equals(self, index):
assert index.equals(index)
assert index.equals(index.astype(object))
assert index.equals(CategoricalIndex(index))
assert index.equals(CategoricalIndex(index.astype(object)))
def test_not_equals_non_arraylike(self, index):
assert not index.equals(list(index))
def test_not_equals_strings(self, index):
other = Index([str(x) for x in index], dtype=object)
assert not index.equals(other)
assert not index.equals(CategoricalIndex(other))
def test_not_equals_misc_strs(self, index):
other = Index(list("abc"))
assert not index.equals(other)
class TestPeriodIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return period_range("2013-01-01", periods=5, freq="D")
# TODO: de-duplicate with other test_equals2 methods
@pytest.mark.parametrize("freq", ["D", "M"])
def test_equals2(self, freq):
# GH#13107
idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals( | pd.Series(idx) | pandas.Series |
import pandas as pd
import datetime as dtt
import numpy as np
import matplotlib.pyplot as plt
import copy
excel_path = "E:\\Desktop\\PyCode\\data.xlsx"
clean_excel_path = "E:\\Desktop\\uads\\AnalysisReport\\cleandata_basis.xlsx"
clean_excel_path2 = "E:\\Desktop\\PyCode\\cleandata_basis2.xlsx"
cornData_excel_path = "E:\\Desktop\\uads\\AnalysisReport\\CornData.xlsx"
# df = pd.read_excel(excel_path)
# print(df.shape)
# print(df.iloc[3062,6].date())
# if not pd.isnull(df.iloc[3062,0]):
# print(type(df.iloc[3062,0]))
# else:
# print('Nodata')
def df_clean(df):
delta1 = 0
delta2 = 0
delta3 = 0
dflens = len(df.iloc[:,0])
print(df.shape)
clean_df = pd.DataFrame(np.zeros((dflens,8)))
for i in range(dflens):
if | pd.notnull(df.iloc[i,0]) | pandas.notnull |
'''Unit tests for functions in cross_correlate.py'''
import numpy as np
import pandas as pd
import pytest
from cross_correlate import get_cross_cor
def test_get_cross_cor():
"""
Tests the ability of get_cross_cor to properly correlate two arrays. Identical arrays must give zero, opposite arrays must give one. Also tests to see if it masks bad data properly
"""
print("Running test")
opposite_array = np.array([1, -1, 1, -1, 1])
ones_array = np.ones(5)
half_array = np.ones(5)*0.5
bad_array = np.ones(5)*0.8
bad_array[3] = -99
opposite_df = pd.DataFrame(opposite_array, columns=['f_lambda'])
ones_df = pd.DataFrame(ones_array, columns=['f_lambda'])
half_df = pd.DataFrame(half_array, columns=['f_lambda'])
bad_df = | pd.DataFrame(bad_array, columns=['f_lambda']) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
import sys
from modin.pandas.test.utils import (
NROWS,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
axis_keys,
axis_values,
int_arg_keys,
int_arg_values,
create_test_dfs,
eval_general,
generate_multiindex,
extra_test_parameters,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_setitem(md_df, pd_df, value, col=None, loc=None):
if loc is not None:
col = pd_df.columns[loc]
value_getter = value if callable(value) else (lambda *args, **kwargs: value)
eval_general(
md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True
)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_with_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_without_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [70, 600, 30, -200, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"lookup",
[
[60, 70, 90],
[60.5, 70.5, 100],
],
)
@pytest.mark.parametrize("subset", ["col2", "col1", ["col1", "col2"], None])
def test_asof_large(lookup, subset):
data = test_data["float_nan_data"]
index = list(range(NROWS))
modin_where = pd.Index(lookup)
pandas_where = pandas.Index(lookup)
compare_asof(data, index, modin_where, pandas_where, subset)
def compare_asof(
data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset
):
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
df_equals(
modin_df.asof(modin_where, subset=subset),
pandas_df.asof(pandas_where, subset=subset),
)
df_equals(
modin_df.asof(modin_where.values, subset=subset),
pandas_df.asof(pandas_where.values, subset=subset),
)
df_equals(
modin_df.asof(list(modin_where.values), subset=subset),
pandas_df.asof(list(pandas_where.values), subset=subset),
)
df_equals(
modin_df.asof(modin_where.values[0], subset=subset),
pandas_df.asof(pandas_where.values[0], subset=subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import torch
import os
import numpy as np
from PIL import Image
import Constants
from data import cxr_process as preprocess
import pandas as pd
from torchvision import transforms
import pickle
from pathlib import Path
from torch.utils.data import Dataset, ConcatDataset
def get_dfs(envs = [], split = None, only_frontal = False):
dfs = []
for e in envs:
func = preprocess.get_process_func(e)
paths = Constants.df_paths[e]
if split is not None:
splits = [split]
else:
splits = ['train', 'val', 'test']
dfs += [func(pd.read_csv(paths[i]), only_frontal) for i in splits]
return pd.concat(dfs, ignore_index = True, sort = False).sample(frac=1) #shuffle
def prepare_df_for_cb(df, positive_env = 'CXP'):
df2 = df.copy()
df2 = df2.rename(columns = {'path': 'filename', 'Atelectasis': 'label', 'env': 'conf'})
df2['conf'] = (df2['conf'] == positive_env).astype(int)
df2['label'] = (df2['label']).astype(int)
return df2
def dataset_from_cb_output(orig_df, labels_gen, split, causal_type, data_type, cache = False):
'''
massages output from labels_gen (which is only filename, label, conf) into a more informative
dataframe format to allow for generalized caching in dataloader
'''
envs = orig_df.env.unique()
augmented_dfs, labels_env = {i: {} for i in envs}, []
temp = orig_df.set_index('path').loc[labels_gen[:, 0], :].reset_index()
for i in envs:
# assert(len(np.unique(labels_gen[:, 0])) == len(labels_gen)) # this should give error for deconf
# augmented_dfs[i][split] = orig_df[(orig_df.path.isin(labels_gen[:, 0])) & (orig_df.env == i)]
subset = (temp.env == i)
augmented_dfs[i][split] = temp[subset]
labels_env.append(labels_gen[subset, :])
dataset = get_dataset(envs, split, only_frontal = False, imagenet_norm = True, augment = 1 if split == 'train' else 0,
cache = cache, subset_label = 'Atelectasis', augmented_dfs = augmented_dfs)
return CXRWrapper(dataset, np.concatenate(labels_env), causal_type, data_type)
def get_dataset(envs = [], split = None, only_frontal = False, imagenet_norm = True, augment = 0, cache = False, subset_label = None,
augmented_dfs = None):
if split in ['val', 'test']:
assert(augment in [0, -1])
if augment == 1: # image augmentations
image_transforms = [transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomResizedCrop(size = 224, scale = (0.75, 1.0)),
transforms.ToTensor()]
elif augment == 0:
image_transforms = [transforms.ToTensor()]
elif augment == -1: # only resize, just return a dataset with PIL images; don't ToTensor()
image_transforms = []
if imagenet_norm and augment != -1:
image_transforms.append(transforms.Normalize(Constants.IMAGENET_MEAN, Constants.IMAGENET_STD))
datasets = []
for e in envs:
func = preprocess.get_process_func(e)
paths = Constants.df_paths[e]
if split is not None and split != 'all':
splits = [split]
else:
splits = ['train', 'val', 'test']
if augmented_dfs is not None: # use provided dataframes instead of loading
dfs = [augmented_dfs[e][i] for i in splits]
else:
dfs = [func( | pd.read_csv(paths[i]) | pandas.read_csv |
import glob
import os
from functools import wraps
from shutil import rmtree
# import matplotlib
# matplotlib.use('Pdf') # noqa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.lines import Line2D
if os.getenv("FLEE_TYPE_CHECK") is not None and os.environ["FLEE_TYPE_CHECK"].lower() == "true":
from beartype import beartype as check_args_type
else:
def check_args_type(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@check_args_type
def mkdir_p(mypath: str) -> None:
"""
Creates a directory. equivalent to using mkdir -p on the command line
Args:
mypath (str): Description
"""
if os.path.exists(mypath):
rmtree(mypath)
os.makedirs(mypath)
@check_args_type
def plot_flee_forecast(input_dir: str, region_names: list = None) -> None:
"""
Summary
Args:
input_dir (str): Description
region_names (list, optional): Description
"""
print("INPUT DIRECTORY={}".format(input_dir))
# data_dir = os.path.join(input_dir, 'RUNS')
# print("data_dir = {}".format(data_dir))
# we add empty string here to calculate results contains from all
# available config folder names in RUNS directory
if region_names is None:
region_names = []
region_names.append("")
# print("region_names = {}".format(region_names))
# clear the result_plots directory
mkdir_p(os.path.join(input_dir, "plots"))
for region_name in region_names:
output_dir = os.path.join(input_dir, "plots")
if len(region_name) > 0:
output_dir = os.path.join(output_dir, region_name)
else:
output_dir = os.path.join(output_dir, "entire_runs")
print("OUTPUT DIRECTORY={}".format(output_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if len(region_name) == 0:
all_files = glob.glob(input_dir + "/RUNS/**/out.csv")
else:
all_files = [
f
for f in glob.glob(input_dir + "/RUNS/**/out.csv")
if region_name in os.path.abspath(f)
and os.path.basename(os.path.dirname(f)).index(region_name) == 0
]
# print("Collected out.csv files for analysis:")
# pprint(all_files)
li = []
for filename in all_files:
df = | pd.read_csv(filename, index_col=None, header=0) | pandas.read_csv |
"""
Monthly Class
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from datetime import datetime
from typing import Union
import numpy as np
import pandas as pd
from meteostat.core.cache import get_file_path, file_in_cache
from meteostat.core.loader import processing_handler, load_handler
from meteostat.utilities.validations import validate_series
from meteostat.utilities.aggregations import degree_mean, weighted_average
from meteostat.interface.timeseries import Timeseries
from meteostat.interface.point import Point
class Monthly(Timeseries):
"""
Retrieve monthly weather data for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = 'monthly'
# Default frequency
_freq: str = '1MS'
# Columns
_columns: list = [
'year',
'month',
'tavg',
'tmin',
'tmax',
'prcp',
'snow',
'wdir',
'wspd',
'wpgt',
'pres',
'tsun'
]
# Index of first meteorological column
_first_met_col = 2
# Data types
_types: dict = {
'tavg': 'float64',
'tmin': 'float64',
'tmax': 'float64',
'prcp': 'float64',
'snow': 'float64',
'wdir': 'float64',
'wspd': 'float64',
'wpgt': 'float64',
'pres': 'float64',
'tsun': 'float64'
}
# Columns for date parsing
_parse_dates: dict = {
'time': [0, 1]
}
# Default aggregation functions
aggregations: dict = {
'tavg': 'mean',
'tmin': 'mean',
'tmax': 'mean',
'prcp': 'sum',
'snow': 'max',
'wdir': degree_mean,
'wspd': 'mean',
'wpgt': 'max',
'pres': 'mean',
'tsun': 'sum'
}
def _load(
self,
station: str
) -> None:
"""
Load file from Meteostat
"""
# File name
file = 'monthly/' + ('full' if self._model else 'obs') + \
'/' + station + '.csv.gz'
# Get local file path
path = get_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
self._types,
self._parse_dates)
# Validate Series
df = validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Filter time period and append to DataFrame
if self._start and self._end:
# Get time index
time = df.index.get_level_values('time')
# Filter & return
return df.loc[(time >= self._start) & (time <= self._end)]
# Return
return df
def _get_data(self) -> None:
"""
Get all required data
"""
if len(self._stations) > 0:
# List of datasets
datasets = []
for station in self._stations:
datasets.append((
str(station),
))
# Data Processing
return processing_handler(datasets, self._load, self.processes, self.threads)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
def _resolve_point(
self,
method: str,
stations: pd.DataFrame,
alt: int,
adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self._stations.size == 0 or self._data.size == 0:
return None
def adjust_temp(data: pd.DataFrame):
"""
Adjust temperature-like data based on altitude
"""
data.loc[data['tavg'] != np.NaN, 'tavg'] = data['tavg'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmin'] != np.NaN, 'tmin'] = data['tmin'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmax'] != np.NaN, 'tmax'] = data['tmax'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
return data
if method == 'nearest':
if adapt_temp:
# Join elevation of involved weather stations
data = self._data.join(
stations['elevation'], on='station')
# Adapt temperature-like data based on altitude
data = adjust_temp(data)
# Drop elevation & round
data = data.drop('elevation', axis=1).round(1)
else:
data = self._data
self._data = self._data.groupby(
| pd.Grouper(level='time', freq=self._freq) | pandas.Grouper |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Class of list dict."""
import csv
import os
from collections import OrderedDict
import pandas as pd
class ListDict:
"""Class of list dict.
:param data: data
:type data: list
"""
def __init__(self, data=None, **kwargs):
if data is None:
data = []
self.data = data
self.kwargs = kwargs
def __len__(self):
"""Get the length of data."""
return len(self.data)
def __getitem__(self, key: (int, slice, str, tuple, list)):
"""Get item."""
if isinstance(key, str):
return [p[key] for p in self.data]
elif isinstance(key, int):
return self.data[key]
elif isinstance(key, slice):
return self.__class__(data=self.data[key], **self.kwargs)
elif isinstance(key, (tuple, list)):
records = []
for key_ in key:
records.append(self[key_])
if isinstance(records[-1], (dict, OrderedDict)):
return self.__class__(data=records, **self.kwargs)
else:
return list(zip(*records))
else:
raise TypeError('Key must be str or list')
def __str__(self):
"""Str."""
s = []
for i in self.data:
s.append(str(i))
return '\n'.join(s)
@property
def header(self):
"""Get the header of the data."""
if len(self.data) > 0:
return list(self.data[0].keys())
else:
return None
def get(self, key, default=None):
"""Get value for key."""
try:
return self[key]
except BaseException:
return default
def append(self, data):
"""Append data."""
if isinstance(data, ListDict):
if len(data) != 0:
raise Exception('data len must be 0')
data = data.data[0]
if isinstance(data, (dict, OrderedDict)):
self.data.append(data)
else:
raise TypeError(
'Method append does support for type {}'.format(
type(data)))
def extend(self, data):
"""Extend data."""
if isinstance(data, ListDict):
data = data.data
if isinstance(data, list):
self.data.extend(data)
else:
raise TypeError(
'Method extend does support for type {}'.format(
type(data)))
def insert(self, idx, data):
"""Insert an item."""
if isinstance(data, ListDict):
if len(data) != 0:
raise Exception('data len must be 0')
data = data.data[0]
if isinstance(data, (dict, OrderedDict)):
self.data.insert(idx, data)
else:
raise TypeError(
'Method insert does support for type {}'.format(
type(data)))
def pop(self, idx):
"""Pop an item."""
return self.data.pop(idx)
def to_dataframe(self):
"""Dump to DataFrame."""
return pd.DataFrame(self.data)
def to_csv(self, path, index=False, **kwargs):
"""Dump to csv file."""
df = self.to_dataframe()
df.to_csv(path, columns=self.header, index=index, **kwargs)
@classmethod
def load_csv(cls, path, **kwargs):
"""Load csv file."""
if not os.path.isfile(path):
raise FileExistsError('{} does not exist.'.format(path))
df = | pd.read_csv(path) | pandas.read_csv |
import os
import sys
import time
import asyncio
import matplotlib.pyplot as plt
import bar_chart_race as bcr
import pandas
from datetime import datetime
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from utils.setup import stats, DbStatsManager, DbConnection # noqa: E402
""" Script to generate a bar chart race video for a given canvas """
async def get_stats_df(dt1, dt2, canvas: bool) -> pandas.DataFrame:
# config
dates_skipped = 4
video_duration = 90 # seconds
video_duration = video_duration / dates_skipped
steps_per_period = 10
canvas_code = "55"
colors = False # to get a bar chart of the canvas colors
if colors:
nb_bars = 32
title = f"Canvas {canvas_code} - Colors (non-virgin pixels)"
else:
nb_bars = 20
title = f"Canvas {canvas_code} - Top {nb_bars}"
file_title = f"c{canvas_code}{'colors' if colors else 'top'+str(nb_bars)}.mp4"
db_conn = DbConnection()
db_stats = DbStatsManager(db_conn, stats)
record1 = await db_stats.find_record(dt1, canvas_code)
record2 = await db_stats.find_record(dt2, canvas_code)
sql = """
SELECT datetime, name, alltime_count, canvas_count
FROM pxls_user_stat
JOIN pxls_name ON pxls_name.pxls_name_id = pxls_user_stat.pxls_name_id
JOIN record on record.record_id = pxls_user_stat.record_id
WHERE pxls_user_stat.record_id BETWEEN ? AND ?
AND record.canvas_code = ?
AND pxls_user_stat.pxls_name_id in (
SELECT pxls_name_id
FROM pxls_user_stat
WHERE record_id = ?
ORDER BY canvas_count DESC
LIMIT 100 )"""
sql_colors = """
SELECT datetime, color_name as name, amount_placed as canvas_count, color_hex
FROM color_stat
JOIN record on color_stat.record_id = record.record_id
JOIN palette_color on color_stat.color_id = palette_color.color_id
WHERE record.canvas_code = ?
AND palette_color.canvas_code = ?"""
print("getting data...")
if colors:
rows = await db_conn.sql_select(sql_colors, (canvas_code, canvas_code))
else:
rows = await db_conn.sql_select(
sql,
(
record1["record_id"],
record2["record_id"],
canvas_code,
record2["record_id"],
),
)
print("nb rows:", len(rows))
# step 1 - group by date
users_dict = {}
dates_dict = {}
for row in rows:
name = row["name"]
dt = row["datetime"]
if canvas:
pixels = row["canvas_count"]
else:
pixels = row["alltime_count"]
try:
dates_dict[dt][name] = pixels
except KeyError:
dates_dict[dt] = {}
dates_dict[dt][name] = pixels
users_dict[name] = None
if not colors:
# truncate the data to only keep the top 100 (at the time of dt2)
last_values_sorted = sorted(
dates_dict[record2["datetime"]].items(), key=lambda x: x[1], reverse=True
)
users_list = [u[0] for u in last_values_sorted[0:100]]
else:
users_list = list(list(dates_dict.items())[0][1].keys())
# step 2 - make columns for each user
columns = {}
indexes = []
for i, dt in enumerate(dates_dict.keys()):
if i % dates_skipped != 0 and i != len(dates_dict.keys()) - 1:
continue
indexes.append(dt)
for name in users_list:
try:
pixels = dates_dict[dt][name]
except KeyError:
pixels = None
try:
columns[name].append(pixels)
except KeyError:
columns[name] = [pixels]
nb_dates = len(indexes)
print("nb dates:", nb_dates)
df = | pandas.DataFrame(columns, index=indexes) | pandas.DataFrame |
from matplotlib.pylab import rcParams
import requests
import pandas as pd
import numpy as np
from pandas import DataFrame
from io import StringIO
import time
import json
from datetime import date
from statsmodels.tsa.stattools import adfuller, acf, pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.metrics import mean_squared_error
import matplotlib.pylab as plt
get_ipython().run_line_magic('matplotlib', 'inline')
rcParams['figure.figsize'] = 15, 6
data = | pd.read_csv("SeaPlaneTravel.csv") | pandas.read_csv |
# Generic ultratils utility functions
import os, sys
import errno
from datetime import datetime
from dateutil.tz import tzlocal
import numpy as np
import pandas as pd
try:
import ultratils.acq
except:
pass
import audiolabel
from ultratils.pysonix.bprreader import BprReader
def make_acqdir(datadir):
"""Make a timestamped directory in datadir and return a tuple with its
name and timestamp. Does not complain if directory already exists."""
tstamp = datetime.now(tzlocal()).replace(microsecond=0).isoformat().replace(":","")
acqdir = os.path.normpath(os.path.join(datadir, tstamp))
# This is 'mkdir -p' style behavior.
try:
os.makedirs(acqdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(acqdir):
pass
else:
print("Could not create {%s}!".format(acqdir))
raise
return (acqdir, tstamp)
def extract_frames(expdir, list_filename=None, frames=None):
"""Extract image frames from specified acquisitions and return as a numpy array and
dataframe with associated metadata.
list_filename = filename containing a list of tuple triples, as in frames
frames = list of tuple triples containing an acquisition timestamp string, a
raw_data_idx frame index, and data type (default is 'bpr')
expdir = the root experiment data directory
Returns an (np.array, pd.DataFrame) tuple in which the array contains the frames of
image data and the DataFrame contains acquisition metadata. The rows of the
DataFrame correspond to the first axis of the array.
"""
fields = ['stimulus', 'timestamp', 'utcoffset', 'versions', 'n_pulse_idx',
'n_raw_data_idx', 'pulse_max', 'pulse_min', 'imaging_params',
'n_frames', 'image_w', 'image_h', 'probe']
if list_filename is not None:
frames = pd.read_csv(list_filename, sep='\s+', header=None)
else:
frames = pd.DataFrame.from_records(frames)
if frames.shape[1] == 2:
frames['dtype'] = 'bpr'
frames.columns = ['tstamp', 'fr_id', 'dtype']
rows = []
data = None
for idx, rec in frames.iterrows():
a = ultratils.acq.Acq(
timestamp=rec['tstamp'],
expdir=expdir,
dtype=rec['dtype']
)
a.gather()
if idx == 0:
for v in a.runtime_vars:
fields.insert(0, v.name)
if rec['dtype'] == 'bpr':
rdr = BprReader(a.abs_image_file)
else:
raise AcqError('Only bpr data is supported.')
# Initialize array with NaN on first pass.
if data is None:
data = np.zeros([len(frames), rdr.header.h, rdr.header.w]) * np.nan
# Assume fr_id is a raw_data_idx if it's an integer; otherwise it's a time.
try:
if 'fr_id' in frames.select_dtypes(include=['integer']).columns:
fr_idx = rec['fr_id']
else:
lm = audiolabel.LabelManager(
from_file=a.abs_sync_tg,
from_type='praat'
)
fr_idx = int(lm.tier('raw_data_idx').label_at(rec['fr_id']).text)
data[idx] = rdr.get_frame(fr_idx)
except Exception as e:
fr_idx = None
row = a.as_dict(fields)
row['raw_data_idx'] = fr_idx
rows.append(row)
return (data, | pd.DataFrame.from_records(rows) | pandas.DataFrame.from_records |
"""
This script is designed to perform table statistics
"""
import pandas as pd
import numpy as np
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from Utils.lc_read_write_mat import read_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\10-24大表.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.groupby('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('诊断')['用药'].value_counts()
describe_sex_550 = scale_selected_550.groupby('诊断')['性别'].value_counts()
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.sum(Nscore, axis=1).describe()
Gscore = pd.DataFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = pd.DataFrame(Gscore)
Gscore = np.sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.groupby('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.groupby('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.groupby('group')['duration'].describe()
describe_sex_206 = scale_data_206.groupby('group')['sex'].value_counts()
#%% -------------------------COBRE----------------------------------
# Inputs
matroot = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\SelectedFC_COBRE' # all mat files directory
scale = r'H:\Data\精神分裂症\COBRE\COBRE_phenotypic_data.csv' # whole scale path
# Transform the .mat files to one .npy file
allmatname = os.listdir(matroot)
# Give labels to each subject, concatenate at the first column
allmatname = pd.DataFrame(allmatname)
allsubjname = allmatname.iloc[:,0].str.findall(r'[1-9]\d*')
allsubjname = pd.DataFrame([name[0] for name in allsubjname])
scale_data = pd.read_csv(scale,sep=',',dtype='str')
print(scale_data)
diagnosis = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type']]
scale_data = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Control'] = 0
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Patient'] = 1
include_loc = diagnosis['Subject Type'] != 'Disenrolled'
diagnosis = diagnosis[include_loc.values]
allsubjname = allsubjname[include_loc.values]
scale_data_COBRE = | pd.merge(allsubjname, scale_data, left_on=0, right_on=0, how='inner') | pandas.merge |
"""
<NAME> VR437255
"""
import matplotlib.pyplot as plt
import pandas as pd
import os
import warnings
from tqdm import tqdm
from utils.forecast import *
warnings.filterwarnings("ignore")
FREQ = "W"
SEASONAL = False
SEASONAL_PERIOD = {
'W': 52,
'M': 12
}
# EXECUTION SETTINGS
EXECUTE_NAIVE = True
EXECUTE_ARIMA = True
EXECUTE_STLARIMA = True
# save forecast results of best with MAE
SAVE_BEST_MAE = True
# save forecast results of best with RMSE
SAVE_BEST_RMSE = True
SAVE_PLOT = {
'NAIVE' : True,
'ARIMA' : True,
'STLARIMA' : True
}
SAVE_FORECAST_RESULTS = {
'NAIVE' : True,
'ARIMA' : True,
'STLARIMA' : True
}
SAVE_ERRORS = True
SAVE_ERRORS_STATISTICS = True
# INPUT PLOT SETTINGS
SAVE_INPUT_PLOT = False
SAVE_INPUT_DECOMPOSITION_PLOT = False
SAVE_INPUT_DIAGNOSTIC_PLOT = False
# ARIMA SETTINGS
GENERATE_AUTO_ARIMA = False
LOAD_ARIMA_FROM_FILE = not GENERATE_AUTO_ARIMA
SAVE_SUMMARY = False
SAVE_ORDERS = False
METHODS = ['naive', 'stlarima', 'arima']
ACCURACY = ['MSE', 'RMSE', 'MAE']
# ===================================
# Considerando un segnale di 2 anni
TRAIN_SIZE = 20 if FREQ == 'M' else 80
PRED_STEPS = 10 if FREQ == 'M' else 30
# FOLDER & FILE
FREQ_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), "monthly_data") if FREQ == 'M' else os.path.join(os.path.dirname(os.path.realpath(__file__)), "weekly_data")
INPUT_FOLDER = os.path.join(FREQ_FOLDER, "input")
OUTPUT_FOLDER = os.path.join(FREQ_FOLDER, "output_seasonal") if SEASONAL else os.path.join(FREQ_FOLDER, "output")
OUTPUT_ERRORS_FOLDER = os.path.join(OUTPUT_FOLDER, "errors")
DATASET_FNAME = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "dataset/input_completo.csv")
ARIMA_MODEL_FNAME = os.path.join(FREQ_FOLDER, "arima_model_seasonal.csv") if SEASONAL else os.path.join(FREQ_FOLDER, "arima_model.csv")
INPUT_PLOT_FOLDERS = {
'INPUT' : os.path.join(INPUT_FOLDER, 'input_plot'),
'DIAGNOSTIC' : os.path.join(INPUT_FOLDER, 'input_diagnostic_plot'),
'DECOMPOSITION' : os.path.join(INPUT_FOLDER, 'input_decomposition_plot')
}
OUTPUT_FORECAST_FOLDERS = {
'OUTPUT' : os.path.join(OUTPUT_FOLDER, 'forecast_results'), # base folder
'ARIMA' : os.path.join(OUTPUT_FOLDER, 'forecast_results/arima'),
'STLARIMA' : os.path.join(OUTPUT_FOLDER, 'forecast_results/stl_arima'),
'NAIVE' : os.path.join(OUTPUT_FOLDER, 'forecast_results/naive')
}
OUTPUT_PLOT_FOLDERS = {
'OUTPUT' : os.path.join(OUTPUT_FOLDER, 'output_plot'), # base folder
'ARIMA' : os.path.join(OUTPUT_FOLDER, 'output_plot/arima'),
'STLARIMA' : os.path.join(OUTPUT_FOLDER, 'output_plot/stl_arima'),
'NAIVE' : os.path.join(OUTPUT_FOLDER, 'output_plot/naive'),
'BEST' : os.path.join(OUTPUT_FOLDER, 'output_plot/best'),
}
OUTPUT_SUMMARY_FOLDERS = {
'OUTPUT': os.path.join(OUTPUT_FOLDER, 'output_summary'), # base folder
'ARIMA' : os.path.join(OUTPUT_FOLDER, 'output_summary/arima')
}
# check if all folders exist
# create directories if they don't exist
if not os.path.isdir(FREQ_FOLDER):
os.mkdir(FREQ_FOLDER)
if not os.path.isdir(INPUT_FOLDER):
os.mkdir(INPUT_FOLDER)
if not os.path.isdir(OUTPUT_FOLDER):
os.mkdir(OUTPUT_FOLDER)
if not os.path.isdir(OUTPUT_ERRORS_FOLDER):
os.mkdir(OUTPUT_ERRORS_FOLDER)
for key, directory in INPUT_PLOT_FOLDERS.items():
if not os.path.isdir(directory):
os.mkdir(directory)
for key, directory in OUTPUT_FORECAST_FOLDERS.items():
if not os.path.isdir(directory):
os.mkdir(directory)
for key, directory in OUTPUT_PLOT_FOLDERS.items():
if not os.path.isdir(directory):
os.mkdir(directory)
for key, directory in OUTPUT_SUMMARY_FOLDERS.items():
if not os.path.isdir(directory):
os.mkdir(directory)
# ===================================
ts = load_csv_timestamp(DATASET_FNAME)
if FREQ == 'M':
ts = ts.resample('M').mean()
train = ts[:TRAIN_SIZE]
test = ts[TRAIN_SIZE:]
if SAVE_INPUT_PLOT:
print("Saving input plots")
save_input_plots(train, test, INPUT_PLOT_FOLDERS['INPUT'])
if SAVE_INPUT_DIAGNOSTIC_PLOT:
print("Saving input diagnostic plots")
save_input_diagnostic_plots(train, INPUT_PLOT_FOLDERS['DIAGNOSTIC'], lags=9)
if SAVE_INPUT_DECOMPOSITION_PLOT:
print("Saving input decomposition plots")
save_input_decomposition_plots(train, INPUT_PLOT_FOLDERS['DECOMPOSITION'])
# INIT ERRORS Dataframe
errors = {
'MSE': pd.DataFrame(index=pd.Index(ts.keys()), columns=METHODS),
'RMSE': pd.DataFrame(index=pd.Index(ts.keys()), columns=METHODS),
'MAE': pd.DataFrame(index=pd.Index(ts.keys()), columns=METHODS)
}
fig = plt.figure()
if EXECUTE_NAIVE:
for key in tqdm(ts.keys()):
tmp_train = pd.DataFrame(train[key])
tmp_test = pd.DataFrame(test[key])
forecast = naive(train=tmp_train, h=PRED_STEPS, freq=FREQ)
if SAVE_PLOT['NAIVE']:
forecast_plot(train=tmp_train, test=tmp_test, fitted_values=None, forecast_values=forecast, plot_title=f'Naive {key}')
plt.savefig(os.path.join(OUTPUT_PLOT_FOLDERS['NAIVE'], key+".png"))
fig.clear()
accuracy = get_accuracy(tmp_test, forecast[:len(tmp_test)])
for a in ACCURACY:
errors[a].loc[key, 'naive'] = accuracy[a][0]
if SAVE_FORECAST_RESULTS:
index_label = "month" if FREQ == 'M' else 'week'
forecast.to_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['NAIVE'], key+'.csv'), index_label=index_label, header=['value'])
if EXECUTE_ARIMA:
print("\n")
print("="*50)
print("\nARIMA MODELS\n")
if SAVE_ORDERS:
arima_model_out = open(ARIMA_MODEL_FNAME, "w")
arima_model_out.write("key;order;seasonal_order\n")
if LOAD_ARIMA_FROM_FILE:
arima_model_orders = pd.read_csv(ARIMA_MODEL_FNAME, delimiter=";", index_col="key")
for key in tqdm(ts.keys()):
tmp_train = train[key]
tmp_test = test[key]
if GENERATE_AUTO_ARIMA:
if SEASONAL:
arima_model_orders = generate_auto_arima_model(train=tmp_train, seasonal=True, m=SEASONAL_PERIOD[FREQ])
else:
arima_model_orders = generate_auto_arima_model(train=tmp_train, seasonal=False, m=1)
order = arima_model_orders['order']
seasonal_order = arima_model_orders['seasonal_order']
elif LOAD_ARIMA_FROM_FILE:
order = arima_model_orders['order'][key].strip("(").strip(")").split(", ")
order = (int(order[0]), int(order[1]), int(order[2]))
seasonal_order = arima_model_orders['seasonal_order'][key].strip("(").strip(")").split(", ")
seasonal_order = (int(seasonal_order[0]), int(seasonal_order[1]), int(seasonal_order[2]), int(seasonal_order[3]))
model = arima_model(train=tmp_train, order=order, seasonal_order=seasonal_order)
if SAVE_SUMMARY:
summary_out = open(os.path.join(OUTPUT_SUMMARY_FOLDERS['ARIMA'], key+"_summary.txt"), "w")
summary_out.write(str(model.summary()))
summary_out.close()
if SAVE_ORDERS:
arima_model_out.write(f"{key};{str(order)};{str(seasonal_order)}\n")
arima_model_out.flush()
forecast = model.get_forecast(PRED_STEPS)
prediction = forecast.predicted_mean
conf_int_95 = forecast.conf_int(alpha=0.5)
fitted = model.fittedvalues
if SAVE_PLOT['ARIMA']:
plot_title = f"ARIMA{order}{seasonal_order} {key}" if SEASONAL else f"ARIMA{order} {key}"
forecast_plot(train=tmp_train, test=tmp_test, fitted_values=fitted,
forecast_values=prediction, plot_title=plot_title, new_fig=False)
plt.fill_between(
x=conf_int_95.index, y1=conf_int_95[f'lower {key}'], y2=conf_int_95[f'upper {key}'], alpha=0.3, color=CONF_INT_COLOR)
plt.savefig(os.path.join(OUTPUT_PLOT_FOLDERS['ARIMA'], key+".png"))
fig.clear()
accuracy = get_accuracy(tmp_test, prediction[:len(tmp_test)])
for a in ACCURACY:
errors[a].loc[key, 'arima'] = accuracy[a][0]
if SAVE_FORECAST_RESULTS:
index_label = "month" if FREQ == 'M' else 'week'
prediction.to_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['ARIMA'], key+'.csv'), index_label=index_label, header=['value'])
if SAVE_ORDERS:
arima_model_out.close()
if EXECUTE_STLARIMA:
print("\n")
print("="*50)
print("\nSTL FORECAST with ARIMA\n")
# LOAD ARIMA ORDERS FROM FILE
arima_model_orders = pd.read_csv(ARIMA_MODEL_FNAME, delimiter=";", index_col="key")
for key in tqdm(ts.keys()):
tmp_train = train[key]
tmp_test = test[key]
order = arima_model_orders['order'][key].strip("(").strip(")").split(", ")
order = (int(order[0]), int(order[1]), int(order[2]))
# STL ARIMA
model = stl_arima_model(tmp_train, order)
forecast = model.forecast(PRED_STEPS)
if SAVE_PLOT['STLARIMA']:
forecast_plot(train=tmp_train, test=tmp_test, fitted_values=None,
forecast_values=forecast, plot_title=f"STL FORECAST with ARIMA{order}")
plt.savefig(os.path.join(OUTPUT_PLOT_FOLDERS['STLARIMA'], key+".png"))
fig.clear()
accuracy = get_accuracy(tmp_test, forecast[:len(tmp_test)])
for a in ACCURACY:
errors[a].loc[key, 'stlarima'] = accuracy[a][0]
if SAVE_FORECAST_RESULTS:
index_label = "month" if FREQ == 'M' else 'week'
forecast.to_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['STLARIMA'], key+'.csv'), index_label=index_label, header=['value'])
if SAVE_ERRORS:
for key, element in errors.items():
element.sort_values(by=METHODS).to_csv(os.path.join(OUTPUT_ERRORS_FOLDER, key + ".csv"), index_label="key")
# LOAD ERRORS FROM FILE
mae = pd.read_csv(os.path.join(OUTPUT_ERRORS_FOLDER, 'MAE.csv'), index_col='key')
rmse = pd.read_csv(os.path.join(OUTPUT_ERRORS_FOLDER, 'RMSE.csv'), index_col='key')
best_mae = {}
best_rmse = {}
for method in METHODS:
mae = mae.sort_values(by=method)
best_mae[method] = mae.iloc[0:5].index.to_list()
rmse = rmse.sort_values(by=method)
best_rmse[method] = rmse.iloc[0:5].index.to_list()
ts_forecast_index = get_prediction_ts(train, freq=FREQ, h=PRED_STEPS).index[:PRED_STEPS]
if SAVE_BEST_MAE:
print("\n")
print("="*50)
print("\nSAVE BEST RESULTS (MAE)\n")
for m, keys in tqdm(best_mae.items()):
for key in keys:
forecast_naive = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['NAIVE'], key+'.csv'), index_col="week")
forecast_arima = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['ARIMA'], key+'.csv'), index_col="week")
forecast_stlarima = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['STLARIMA'], key+'.csv'), index_col="week")
ts_forecast = pd.DataFrame(index=ts_forecast_index, columns=['naive', 'arima', 'stlarima'])
ts_forecast['naive'] = forecast_naive.values
ts_forecast['arima'] = forecast_arima.values
ts_forecast['stlarima'] = forecast_stlarima.values
tmp_train = train[key]
tmp_test = test[key]
compare_forecast_plot(train=tmp_train, test=tmp_test, ts_forecast=ts_forecast, fig=fig, plot_title=f"{key} - Compare Plot")
plt.savefig(os.path.join(OUTPUT_PLOT_FOLDERS['BEST'], key+".png"))
fig.clear()
if SAVE_BEST_RMSE:
print("\n")
print("="*50)
print("\nSAVE BEST RESULTS (RMSE)\n")
for m, keys in tqdm(best_rmse.items()):
for key in keys:
forecast_naive = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['NAIVE'], key+'.csv'), index_col="week")
forecast_arima = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['ARIMA'], key+'.csv'), index_col="week")
forecast_stlarima = pd.read_csv(os.path.join(OUTPUT_FORECAST_FOLDERS['STLARIMA'], key+'.csv'), index_col="week")
ts_forecast = | pd.DataFrame(index=ts_forecast_index, columns=['naive', 'arima', 'stlarima']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python [conda env:fine-dev-py36]
# language: python
# name: conda-env-fine-dev-py36-py
# ---
# %%
import warnings
warnings.filterwarnings('ignore') # For better visibility, warnings are turned off in this notebook
# %% [markdown]
# # FINE Webinar Part I: 2-nodal Electricity Supply System
# %% [markdown]
# In this application of the FINE framework, an energy supply system, consisting of two-regions, is modeled and optimized.
#
# The workflow is structures as follows:
# - Required packages are imported
# - An energy system model instance is created
# - Commodity sources are added to the energy supply system model
# - Commodity conversion components are added to the energy supply system model
# - Commodity storages are added to the energy supply system model
# - Commodity transmission components are added to the energy supply system model
# - Commodity sinks are added to the energy supply system model
# - The energy supply system model is optimized
# - Selected optimization results are presented
# %% [markdown]
# # Import required packages
#
# The FINE framework is imported which provides the required classes and functions for modeling the energy system.
# %%
import FINE as fn # Provides objects and functions to model an energy system
import pandas as pd # Used to manage data in tables
import shapely as shp # Used to generate geometric objects
import numpy as np # Used to generate random input data
np.random.seed(42) # Sets a "seed" to produce the same random input data in each model run
# %% tags=["nbval-skip"]
import geopandas as gpd # Used to display geo-referenced plots
# %% [markdown]
# # Model an energy system
# %% [markdown]
# ## Create an energy system model instance
#
# The structure of the energy supply system model is given by the considered locations, commodities, the number of time steps as well as the hours per time step.
#
# The commodities are specified by a unit (i.e. 'GW_electric', 'GW_naturalGas_lowerHeatingValue', 'Mio. t CO2/h') which can be given as an energy or mass unit per hour. Furthermore, the cost unit and length unit are specified.
# %% code_folding=[]
# Input parameters
locations = {'regionN', 'regionS'}
commodityUnitDict = {'electricity': r'GW$_{el}$', 'naturalGas': r'GW$_{CH_{4},LHV}$',
'CO2': r'Mio. t$_{CO_2}$/h'}
commodities = {'electricity', 'naturalGas', 'CO2'}
numberOfTimeSteps, hoursPerTimeStep = 8760, 1
costUnit, lengthUnit = '1e6 Euro', 'km'
# Code
esM = fn.EnergySystemModel(locations=locations, commodities=commodities,
numberOfTimeSteps=numberOfTimeSteps, commodityUnitsDict=commodityUnitDict,
hoursPerTimeStep=hoursPerTimeStep, costUnit=costUnit, lengthUnit=lengthUnit, verboseLogLevel=0)
# %% [markdown]
# ## Add source components
#
# Source components generate commodities across the energy system's virtual boundaries.
# %%
# Input parameters
name, commodity ='Wind turbines', 'electricity'
hasCapacityVariable = True
operationRateMax = pd.DataFrame([[np.random.beta(a=2,b=7.5),np.random.beta(a=2,b=9)]
for t in range(8760)],
index=range(8760), columns=['regionN', 'regionS']).round(6)
capacityMax = | pd.Series([400, 200], index=['regionN', 'regionS']) | pandas.Series |
from __future__ import annotations
from ..watcher import Watcher as W
import pandas as pd
import numpy as np
import scipy
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression, RidgeClassifier, PassiveAggressiveClassifier, LinearRegression
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron, SGDClassifier
from sklearn.neural_network import MLPClassifier
from time import time
from tensorflow.keras import utils
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from time import time
import pandas as pd
import numpy as np
# Также в этом классе будут функции для статистического анализа и построения графиков
# Класс для методов машинного обучения
class WifiLearn:
def __init__(self, x_train: pd.DataFrame, y_train: pd.DataFrame, x_test: pd.DataFrame, y_test: pd.DataFrame):
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.lens = { 'train': x_train.shape[0], 'test': x_test.shape[0] }
self.__w = W()
self.results = []
self.__to_categorical()
self.__w.hprint(self.__w.INFO, 'WifiLearn: create with ' + str(self.lens['train']) + ' train and ' + str(self.lens['test']) + ' test packets')
@W.stopwatch
def __to_categorical(self):
self.types = sorted(self.y_train.unique())
i = 0
y_train = self.y_train.copy()
y_test = self.y_test.copy()
for t in self.types:
y_train[y_train == t] = i
y_test[y_test == t] = i
i += 1
self.y_train_cat = utils.to_categorical(y_train, len(self.types))
self.y_test_cat = utils.to_categorical(y_test, len(self.types))
def augment(self, part=1):
pass
def normalize(self):
pass
def shuffle(self, part: int=1):
pass
def print(self) -> WifiLearn:
print( | pd.DataFrame(self.results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# + {}
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import matplotlib as mpl
import numba
import squarify
import numpy as np
from math import pi
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture as GMM
from umap import UMAP
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from datetime import date
from warnings import filterwarnings
import os
import community
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.utils import np_utils
from keras.metrics import categorical_accuracy
from keras.layers import Dropout
import keras.backend as K
filterwarnings('ignore')
# +
def get_gene_data(data, gene_name_column, test_gene_list):
"""Extract data from specific genes given a larger dataframe.
Inputs
* data: large dataframe from where to filter
* gene_name_column: column to filter from
* test_gene_list : a list of genes you want to get
Output
* dataframe with the genes you want
"""
gene_profiles = pd.DataFrame()
for gene in data[gene_name_column].values:
if gene in test_gene_list:
df_ = data[(data[gene_name_column] == gene)]
gene_profiles = pd.concat([gene_profiles, df_])
gene_profiles.drop_duplicates(inplace = True)
return gene_profiles
# ---------PANDAS FUNCTIONS FOR DATA EXPLORATION -------------------------
def count_feature_types(data):
"""
Get the dtype counts for a dataframe's columns.
"""
df_feature_type = data.dtypes.sort_values().to_frame('feature_type')\
.groupby(by='feature_type').size().to_frame('count').reset_index()
return df_feature_type
def get_df_missing_columns(data):
'''
Get a dataframe of the missing values in each column with its
corresponding dtype.
'''
# Generate a DataFrame with the % of missing values for each column
df_missing_values = (data.isnull().sum(axis = 0) / len(data) * 100)\
.sort_values(ascending = False)\
.to_frame('% missing_values').reset_index()
# Generate a DataFrame that indicated the data type for each column
df_feature_type = data.dtypes.to_frame('feature_type').reset_index()
# Merge frames
missing_cols_df = pd.merge(df_feature_type, df_missing_values, on = 'index',
how = 'inner')
missing_cols_df.sort_values(['% missing_values', 'feature_type'], inplace = True)
return missing_cols_df
def find_constant_features(data):
"""
Get a list of the constant features in a dataframe.
"""
const_features = []
for column in list(data.columns):
if data[column].unique().size < 2:
const_features.append(column)
return const_features
def duplicate_columns(frame):
'''
Get a list of the duplicate columns in a pandas dataframe.
'''
groups = frame.columns.to_series().groupby(frame.dtypes).groups
dups = []
for t, v in groups.items():
cs = frame[v].columns
vs = frame[v]
lcs = len(cs)
for i in range(lcs):
ia = vs.iloc[:,i].values
for j in range(i+1, lcs):
ja = vs.iloc[:,j].values
if np.array_equal(ia, ja):
dups.append(cs[i])
break
return dups
def get_duplicate_columns(df):
"""
Returns a list of duplicate columns
"""
groups = df.columns.to_series().groupby(df.dtypes).groups
dups = []
for t, v in groups.items():
cs = df[v].columns
vs = df[v]
lcs = len(cs)
for i in range(lcs):
ia = vs.iloc[:,i].values
for j in range(i+1, lcs):
ja = vs.iloc[:,j].values
if np.array_equal(ia, ja):
dups.append(cs[i])
break
return dups
def get_df_stats(df):
"""
Wrapper for dataframe stats.
Output: missing_cols_df, const_feats, dup_cols_list
"""
missing_cols_df = get_df_missing_columns(df)
const_features_list = find_constant_features(df)
dup_cols_list = duplicate_columns(df)
return missing_cols_df, const_features_list, dup_cols_list
def test_missing_data(df, fname):
"""Look for missing entries in a DataFrame."""
assert np.all(df.notnull()), fname + ' contains missing data'
def col_encoding(df, column):
"""
Returns a one hot encoding of a categorical colunmn of a DataFrame.
------------------------------------------------------------------
Params
-------
-df:
-column: name of the column to be one-hot-encoded in string format.
Returns
---------
- hot_encoded: one-hot-encoding in matrix format.
"""
le = LabelEncoder()
label_encoded = le.fit_transform(df[column].values)
hot = OneHotEncoder(sparse = False)
hot_encoded = hot.fit_transform(label_encoded.reshape(len(label_encoded), 1))
return hot_encoded
def one_hot_df(df, cat_col_list):
"""
Make one hot encoding on categoric columns.
Returns a dataframe for the categoric columns provided.
-------------------------
inputs
- df: original input DataFrame
- cat_col_list: list of categorical columns to encode.
outputs
- df_hot: one hot encoded subset of the original DataFrame.
"""
df_hot = pd.DataFrame()
for col in cat_col_list:
encoded_matrix = col_encoding(df, col)
df_ = pd.DataFrame(encoded_matrix,
columns = [col+ ' ' + str(int(i))\
for i in range(encoded_matrix.shape[1])])
df_hot = pd.concat([df_hot, df_], axis = 1)
return df_hot
# OTHER FUNCTIONS
def plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):
"""
Wrapper from JakeVDP data analysis handbook
"""
labels = kmeans.fit_predict(X)
# plot the input data
ax = ax or plt.gca()
ax.axis('equal')
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
# plot the representation of the KMeans model
centers = kmeans.cluster_centers_
radii = [cdist(X[labels == i], [center]).max()
for i, center in enumerate(centers)]
for c, r in zip(centers, radii):
ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))
@numba.jit(nopython=True)
def draw_bs_sample(data):
"""
Draw a bootstrap sample from a 1D data set.
Wrapper from J. Bois' BeBi103 course.
"""
return np.random.choice(data, size=len(data))
def net_stats(G):
'''Get basic network stats and plots. Specifically degree and clustering coefficient distributions.'''
net_degree_distribution= []
for i in list(G.degree()):
net_degree_distribution.append(i[1])
print("Number of nodes in the network: %d" %G.number_of_nodes())
print("Number of edges in the network: %d" %G.number_of_edges())
print("Avg node degree: %.2f" %np.mean(list(net_degree_distribution)))
print('Avg clustering coefficient: %.2f'%nx.cluster.average_clustering(G))
print('Network density: %.2f'%nx.density(G))
fig, axes = plt.subplots(1,2, figsize = (16,4))
axes[0].hist(list(net_degree_distribution), bins=20, color = 'lightblue')
axes[0].set_xlabel("Degree $k$")
#axes[0].set_ylabel("$P(k)$")
axes[1].hist(list(nx.clustering(G).values()), bins= 20, color = 'lightgrey')
axes[1].set_xlabel("Clustering Coefficient $C$")
#axes[1].set_ylabel("$P(k)$")
axes[1].set_xlim([0,1])
def get_network_hubs(ntw):
"""
input: NetworkX ntw
output:Prints a list of global regulator name and eigenvector centrality score pairs
"""
eigen_cen = nx.eigenvector_centrality(ntw)
hubs = sorted(eigen_cen.items(), key = lambda cc:cc[1], reverse = True)[:10]
return hubs
def get_network_clusters(network_lcc, n_clusters):
"""
input = an empyty list
output = a list with the netoworks clusters
"""
cluster_list = []
for i in range(n_clusters):
cluster_lcc = [n for n in network_lcc.nodes()\
if network_lcc.node[n]['modularity'] == i]
cluster_list.append(cluster_lcc)
return cluster_list
def download_and_preprocess_data(org, data_dir = None, variance_ratio = 0.8,
output_path = '~/Downloads/'):
"""
General function to download and preprocess dataset from Colombos.
Might have some issues for using with Windows. If you're using windows
I recommend using the urllib for downloading the dataset.
Params
-------
data_path (str): path to directory + filename. If none it will download the data
from the internet.
org (str) : Organism to work with. Available datasets are E. coli (ecoli),
B.subtilis (bsubt), P. aeruginosa (paeru), M. tb (mtube), etc.
Source: http://colombos.net/cws_data/compendium_data/
variance (float): Fraction of the variance explained to make the PCA denoising.
Returns
--------
denoised (pd.DataFrame)
"""
#Check if dataset is in directory
if data_dir is None:
download_cmd = 'wget http://colombos.net/cws_data/compendium_data/'\
+ org + '_compendium_data.zip'
unzip_cmd = 'unzip '+org +'_compendium_data.zip'
os.system(download_cmd)
os.system(unzip_cmd)
df = pd.read_csv('colombos_'+ org + '_exprdata_20151029.txt',
sep = '\t', skiprows= np.arange(6))
df.rename(columns = {'Gene name': 'gene name'}, inplace = True)
df['gene name'] = df['gene name'].apply(lambda x: x.lower())
else:
df = pd.read_csv(data_dir, sep = '\t', skiprows= np.arange(6))
try :
df.rename(columns = {'Gene name': 'gene name'}, inplace = True)
except:
pass
annot = df.iloc[:, :3]
data = df.iloc[:, 3:]
preprocess = make_pipeline(SimpleImputer(strategy = 'median'),
StandardScaler(), )
scaled_data = preprocess.fit_transform(data)
# Initialize PCA object
pca = PCA(variance_ratio, random_state = 42).fit(scaled_data)
# Project to PCA space
projected = pca.fit_transform(scaled_data)
# Reconstruct the dataset using 80% of the variance of the data
reconstructed = pca.inverse_transform(projected)
# Save into a dataframe
reconstructed_df = pd.DataFrame(reconstructed, columns = data.columns.to_list())
# Concatenate with annotation data
denoised_df = pd.concat([annot, reconstructed_df], axis = 1)
denoised_df['gene name'] = denoised_df['gene name'].apply(lambda x: x.lower())
# Export dataset
denoised_df.to_csv(output_path + 'denoised_' + org + '.csv', index = False)
def annot_data_trn(
tf_tf_net_path=None,
trn_path=None,
denoised_data_path=None,
org="ecoli",
output_path= "~/Downloads/"):
"""
Annotate the preprocessed dataset with network clusters as a one-hot-matrix.
Performs the operation on E. coli by default.
Params
-------
Returns
--------
"""
# Load TF-TF net and TRN
if tf_tf_net_path is None and org is None:
os.system(
"wget http://regulondb.ccg.unam.mx/menu/download/datasets/files/network_tf_tf.txt"
)
tf_trn = pd.read_csv(
"network_tf_tf.txt",
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
else:
try:
tf_trn = pd.read_csv(tf_tf_net_path)
except:
tf_trn = pd.read_csv(
tf_tf_net_path,
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
if trn_path is None:
os.system(
"wget http://regulondb.ccg.unam.mx/menu/download/datasets/files/network_tf_gene.txt"
)
trn = pd.read_csv(
"network_tf_gene.txt",
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
else:
try:
trn = pd.read_csv(trn_path)
except:
trn = pd.read_csv(
trn_path,
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
# Lowercase gene names for both datasets
tf_trn.TF = tf_trn.TF.apply(lambda x: x.lower())
tf_trn.TG = tf_trn.TG.apply(lambda x: x.lower())
trn.TF = trn.TF.apply(lambda x: x.lower())
trn.TG = trn.TG.apply(lambda x: x.lower())
# Turn the TF TRN dataframe into a graph object
net = nx.from_pandas_edgelist(
df=tf_trn, source="TF", target="TG"
)
# Compute the LCC
net = max(nx.connected_component_subgraphs(net), key=len)
# Cluster TF net
communities = community.best_partition(net)
# Get number of clusters
n_clusters_tf = max(communities.values())
# Embed cluster annotation in net
nx.set_node_attributes(net, values=communities, name="modularity")
# Get np.array of TF clusters
cluster_list = np.array(get_network_clusters(net, n_clusters_tf))
# Get cluster sizes
cluster_sizes = np.array([len(clus) for clus in cluster_list])
# Select only the clusters with more than 5 TFs
clus_list = cluster_list[cluster_sizes > 5]
# Get a DataFrame of the TGs in each cluster
tgs_ = pd.DataFrame()
for ix, clus in enumerate(clus_list):
clus_trn = get_gene_data(trn, "TF", clus)
clus_tgs = list(set(clus_trn["TG"].values))
tgs_df = pd.DataFrame({"TGs": clus_tgs})
tgs_df["cluster"] = ix + 1
tgs_ = pd.concat([tgs_, tgs_df])
# -----Start constructing the annotated dataset ------
if denoised_data_path is None:
try:
denoised = | pd.read_csv("denoised_coli.csv") | pandas.read_csv |
from piper.custom import ratio
import datetime
import numpy as np
import pandas as pd
import pytest
from time import strptime
from piper.custom import add_xl_formula
from piper.factory import sample_data
from piper.factory import generate_periods, make_null_dates
from piper.custom import from_julian
from piper.custom import fiscal_year
from piper.custom import from_excel
from piper.custom import to_julian
from piper.verbs import across
# t_sample_data {{{1
@pytest.fixture
def t_sample_data():
return sample_data()
# test_add_xl_formula {{{1
def test_add_xl_formula(t_sample_data):
df = t_sample_data
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
expected = (367, )
assert expected == df.X7.shape
# test_across_str_date_single_col_pd_to_datetime {{{1
def test_across_str_date_single_col_pd_to_datetime():
''' '''
test = ['30/11/2019', '29/4/2019', '30/2/2019', '28/2/2019', '2019/4/30']
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', pd.to_datetime, format='%d/%m/%Y', errors='coerce')
assert exp.equals(got) == True
# test_across_str_date_single_col_lambda {{{1
def test_across_str_date_single_col_lambda():
''' '''
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', convert_date)
assert exp.equals(got) == True
# test_across_raise_column_parm_none_ValueError {{{1
def test_across_raise_column_parm_none():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, columns=None, function=convert_date)
assert exp.equals(got) == True
# test_across_raise_function_parm_none_ValueError {{{1
def test_across_raise_function_parm_none_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='dates', function=None)
# test_across_raise_Series_parm_TypeError {{{1
def test_across_raise_Series_parm_TypeError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(TypeError):
got = across(pd.Series(test), columns='dates', function=convert_date)
# test_across_raise_column_parm_ValueError {{{1
def test_across_raise_column_parm_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='invalid', function=convert_date)
# test_across_dataframe_single_column_with_lambda {{{1
def test_across_dataframe_single_column_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
got = across(df, columns='effective', function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_with_lambda {{{1
def test_across_dataframe_multiple_columns_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
got = across(df, columns=['effective', 'expired'], function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_raise_invalid_column {{{1
def test_across_dataframe_multiple_columns_raise_invalid_column():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
with pytest.raises(ValueError):
got = across(df, columns=['effective', 'invalid'], function=convert_date)
# test_dividing_numbers {{{1
def test_dividing_numbers():
''' '''
exp = 1
got = ratio(2, 2)
assert exp == got
# test_dividing_numbers_by_zero {{{1
def test_dividing_numbers_by_zero():
''' '''
exp = np.inf
got = ratio(2, 0)
assert exp == got
# test_dividing_numbers_floats {{{1
def test_dividing_numbers_floats():
''' '''
exp = 1.0
got = ratio(2.0, 2.0)
assert exp == got
# test_dividing_numbers_float_percent {{{1
def test_dividing_numbers_float_percent():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True)
assert exp == got
# test_dividing_numbers_float_percent_with_round {{{1
def test_dividing_numbers_float_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2.0, 2.0, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1.0, 2.0, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_int_percent_with_round {{{1
def test_dividing_numbers_int_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2, 2, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1, 2, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_percent_with_format {{{1
def test_dividing_numbers_percent_with_format():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True, format=True)
assert exp == got
# test_dividing_numbers_percent_with_precision_format {{{1
def test_dividing_numbers_percent_with_precision_format():
''' '''
exp = '66.66%'
got = ratio(1.3333, 2.0, percent=True,
precision=2, format=True)
assert exp == got
# test_dividing_by_two_series {{{1
def test_dividing_by_two_series():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 2, 3])
exp = pd.Series([10, 10, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_zero_denominator {{{1
def test_dividing_by_two_series_with_zero_denominator():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 0, 3])
exp = pd.Series([10, np.inf, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_decimals {{{1
def test_dividing_by_two_series_with_decimals():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1.3, 5.4, 3])
exp = (s1 / s2).round(2)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_rounding {{{1
def test_dividing_by_two_series_with_rounding():
''' '''
s1 = | pd.Series([10, 20, 30]) | pandas.Series |
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList3[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList3[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == 'all_tumour' and disName2 == 'all_lymph':
dis1_csv = | pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord']) | pandas.read_csv |
# miscellaneous tools
import os
import subprocess
import sys
import pandas as pd
from collections import defaultdict
import gzip
from numpy import unique
import numpy as np
import pickle
#import HTSeq
#import pysam
#PATH = './'
PATH = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
STAR_PATH = os.path.join(HOME, 'split_seq_reqs', 'bin', 'STAR')
if not os.path.exists(STAR_PATH):
STAR_PATH = 'STAR'
SAMTOOLS_PATH = os.path.join(HOME, 'split_seq_reqs', 'bin', 'samtools')
if not os.path.exists(SAMTOOLS_PATH):
SAMTOOLS_PATH = 'samtools'
def download_genome(genome_dir, ref='hg19'):
"""
Downloads the hg19 reference genome...
"""
# TODO: find the hg19 genome???
def make_combined_genome(species, fasta_filenames, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Create a combined fasta file with species names added to the start of each chromosome name
cur_fa = fasta_filenames[0]
cur_species = species[0]
if fasta_filenames[0].split('.')[-1]=='gz':
command = """gunzip -cd {0} | awk 'substr($0,1,1)==">"{{print ">{1}_"substr($1,2,length($1)-1),$2,$3,$4}}substr($0,1,1)!=">"{{print $0}}' > {2}/genome.fa""".format(cur_fa, cur_species, output_dir)
else:
command = """cat {0} | awk 'substr($0,1,1)==">"{{print ">{1}_"substr($1,2,length($1)-1),$2,$3,$4}}substr($0,1,1)!=">"{{print $0}}' > {2}/genome.fa""".format(cur_fa, cur_species, output_dir)
rc = subprocess.call(command, shell=True)
for i in range(1,len(species)):
cur_fa = fasta_filenames[i]
cur_species = species[i]
if fasta_filenames[0].split('.')[-1]=='gz':
command = """gunzip -cd {0} | awk 'substr($0,1,1)==">"{{print ">{1}_"substr($1,2,length($1)-1),$2,$3,$4}}substr($0,1,1)!=">"{{print $0}}' >> {2}/genome.fa""".format(cur_fa, cur_species, output_dir)
else:
command = """cat {0} | awk 'substr($0,1,1)==">"{{print ">{1}_"substr($1,2,length($1)-1),$2,$3,$4}}substr($0,1,1)!=">"{{print $0}}' >> {2}/genome.fa""".format(cur_fa, cur_species, output_dir)
rc = subprocess.call(command, shell=True)
def split_attributes(s):
""" Returns a dictionary from string of attributes in a GTF/GFF file
"""
att_list = s[:-1].split('; ')
att_keys = [a.split(' ')[0] for a in att_list]
att_values = [' '.join(a.split(' ')[1:]) for a in att_list]
return dict(zip(att_keys,att_values))
def get_attribute(s,att):
att_value = ''
try:
att_value = split_attributes(s)[att].strip('"')
except:
att_value = ''
return att_value
def make_gtf_annotations(species, gtf_filenames, output_dir, splicing):
splicing = splicing=='True'
# Load the GTFs
names = ['Chromosome',
'Source',
'Feature',
'Start',
'End',
'Score',
'Strand',
'Frame',
'Attributes']
gtfs = {}
for i in range(len(species)):
s = species[i]
filename = gtf_filenames[i]
gtfs[s] = pd.read_csv(filename,sep='\t',names=names,comment='#',engine='python')
# TODO: allow users to specify the gene biotypes that they want to keep
# For now we keep the following
gene_biotypes_to_keep = ['protein_coding',
'lincRNA',
'antisense',
'IG_C_gene',
'IG_C_pseudogene',
'IG_D_gene',
'IG_J_gene',
'IG_J_pseudogene',
'IG_V_gene',
'IG_V_pseudogene',
'TR_C_gene',
'TR_D_gene',
'TR_J_gene',
'TR_J_pseudogene',
'TR_V_gene',
'TR_V_pseudogene']
if splicing:
# Generate a combined GTF with only the gene annotations
gtf_gene_combined = gtfs[species[0]].query('Feature=="gene"')
gtf_gene_combined.loc[:,'Chromosome'] = species[0] + '_' + gtf_gene_combined.Chromosome.apply(lambda s:str(s))
for i in range(1,len(species)):
gtf_gene_combined_temp = gtfs[species[i]].query('Feature=="gene"')
gtf_gene_combined_temp.loc[:,'Chromosome'] = species[i] + '_' + gtf_gene_combined_temp.Chromosome.apply(lambda s:str(s))
gtf_gene_combined = pd.concat([gtf_gene_combined,gtf_gene_combined_temp])
gene_biotypes = gtf_gene_combined.Attributes.apply(lambda s: get_attribute(s,'gene_biotype'))
#gtf_gene_combined = gtf_gene_combined.iloc[np.where(gene_biotypes.isin(gene_biotypes_to_keep).values)]
gtf_gene_combined.index = range(len(gtf_gene_combined))
gtf_gene_combined.to_csv(output_dir + '/genes.gtf',sep='\t',index=False)
# Generate a combined GTF with only the exon annotations
gtf_exon_combined = gtfs[species[0]].query('Feature=="exon"')
gtf_exon_combined.loc[:,'Chromosome'] = species[0] + '_' + gtf_exon_combined.Chromosome.apply(lambda s:str(s))
for i in range(1,len(species)):
gtf_exon_combined_temp = gtfs[species[i]].query('Feature=="exon"')
gtf_exon_combined_temp.loc[:,'Chromosome'] = species[i] + '_' + gtf_exon_combined_temp.Chromosome.apply(lambda s:str(s))
gtf_exon_combined = pd.concat([gtf_exon_combined,gtf_exon_combined_temp])
gene_biotypes = gtf_exon_combined.Attributes.apply(lambda s: get_attribute(s,'gene_biotype'))
#gtf_exon_combined = gtf_exon_combined.iloc[np.where(gene_biotypes.isin(gene_biotypes_to_keep).values)]
gtf_exon_combined.index = range(len(gtf_exon_combined))
gtf_exon_combined.to_csv(output_dir + '/exons.gtf',sep='\t',index=False)
if not splicing:
gtf_gene_combined = gtf_exon_combined.copy(deep=True)
gtf_gene_combined['Feature'] = 'gene'
gtf_gene_combined.to_csv(output_dir + '/genes.gtf',sep='\t',index=False)
# Get locations of genes. We are using the longest possible span of different transcripts here
gtf_gene_combined.loc[:,'gene_id'] = gtf_gene_combined.Attributes.apply(lambda s: get_attribute(s,'gene_id'))
gene_starts = gtf_gene_combined.groupby('gene_id').Start.apply(min)
gene_ends = gtf_gene_combined.groupby('gene_id').End.apply(max)
chroms = gtf_gene_combined.groupby('gene_id').Chromosome.apply(lambda s:list(s)[0])
strands = gtf_gene_combined.groupby('gene_id').Strand.apply(lambda s:list(s)[0])
gtf_dict_stepsize = 10000
# Create a dictionary for each "bin" of the genome, that maps to a list of genes within or overlapping
# that bin. The bin size is determined by gtf_dict_stepsize.
starts_rounded = gene_starts.apply(lambda s:np.floor(s/gtf_dict_stepsize)*gtf_dict_stepsize).values
ends_rounded = gene_ends.apply(lambda s:np.ceil(s/gtf_dict_stepsize)*gtf_dict_stepsize).values
gene_ids = gene_starts.index
start_dict = gene_starts.to_dict()
end_dict = gene_ends.to_dict()
gene_dict = defaultdict(list)
for i in range(len(gene_starts)):
cur_chrom = chroms[i]
cur_strand = strands[i]
cur_start = int(starts_rounded[i])
cur_end = int(ends_rounded[i])
cur_gene_id = gene_ids[i]
for coord in range(cur_start,cur_end+1,gtf_dict_stepsize):
if not (cur_gene_id in gene_dict[cur_chrom + ':' + str(coord)]):
gene_dict[cur_chrom + ':' + str(coord)+':'+cur_strand].append(cur_gene_id)
# Create a dictionary from genes to exons
exon_gene_ids = gtf_exon_combined.Attributes.apply(lambda s: get_attribute(s,'gene_id')).values
exon_starts = gtf_exon_combined.Start.values
exon_ends = gtf_exon_combined.End.values
exon_gene_start_end_dict = defaultdict(dict)
for i in range(len(exon_gene_ids)):
cur_gene_id = exon_gene_ids[i]
cur_exon_start = exon_starts[i]
cur_exon_ends = exon_ends[i]
exon_gene_start_end_dict[cur_gene_id][cur_exon_start] = cur_exon_ends
gene_id_to_gene_names = dict(zip(gtf_gene_combined.Attributes.apply(lambda s: get_attribute(s,'gene_id')),
gtf_gene_combined.Attributes.apply(lambda s: get_attribute(s,'gene_name'))))
gene_id_to_genome = dict(zip(gtf_gene_combined.Attributes.apply(lambda s: get_attribute(s,'gene_id')),
gtf_gene_combined.Chromosome.apply(lambda s:s.split('_')[0])))
gene_id_to_strand = dict(zip(gtf_gene_combined.Attributes.apply(lambda s:get_attribute(s,'gene_id')).values,
gtf_gene_combined.Strand.values))
gene_id_to_chrom = dict(zip(gtf_gene_combined.Attributes.apply(lambda s:get_attribute(s,'gene_id')).values,
gtf_gene_combined.Chromosome.values))
gene_id_to_biotype = dict(zip(gtf_gene_combined.Attributes.apply(lambda s:get_attribute(s,'gene_id')).values,
gtf_gene_combined.Attributes.apply(lambda s:get_attribute(s,'gene_biotype')).values))
#Save dictionary with gene info
gene_info = {'gene_bins':gene_dict,
'genes_to_exons':exon_gene_start_end_dict,
'gene_starts': start_dict,
'gene_ends': end_dict,
'gene_id_to_name': gene_id_to_gene_names,
'gene_id_to_genome':gene_id_to_genome,
'gene_id_to_chrom':gene_id_to_chrom,
'gene_id_to_strand':gene_id_to_strand,
'gene_id_to_biotype':gene_id_to_biotype
}
with open(output_dir+ '/gene_info.pkl', 'wb') as f:
pickle.dump(gene_info, f, pickle.HIGHEST_PROTOCOL)
def generate_STAR_index(output_dir, nthreads,genomeSAindexNbases,splicing):
splicing = (splicing=='True')
if splicing:
star_command = """STAR --runMode genomeGenerate --genomeDir {0} --genomeFastaFiles {0}/genome.fa --sjdbGTFfile {0}/exons.gtf --runThreadN {1} --limitGenomeGenerateRAM 24000000000 --genomeSAindexNbases {2}""".format(output_dir, nthreads, genomeSAindexNbases)
else:
star_command = """STAR --runMode genomeGenerate --genomeDir {0} --genomeFastaFiles {0}/genome.fa --runThreadN {1} --limitGenomeGenerateRAM 24000000000 --genomeSAindexNbases {2}""".format(output_dir, nthreads, genomeSAindexNbases)
rc = subprocess.call(star_command, shell=True)
return rc
bases = list('ACGT')
def convert_degen_seq_to_list(seq):
"""Uses recursion to convert a degenerate sequence to a list
For example: AGGN -> [AGGA, AGGC, AGGG, AGGT]"""
seq_list = []
N_pos = seq.find('N')
if N_pos>=0:
for b in bases:
seq_list += convert_degen_seq_to_list(seq[:N_pos] + b + seq[N_pos+1:])
else:
seq_list.append(seq)
return seq_list
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def get_min_edit_dists(bc,edit_dict,max_d=3):
"""Returns a list of nearest edit dist seqs
Input 8nt barcode, edit_dist_dictionary
Output <list of nearest edit distance seqs>, <edit dist>"""
bc_matches = edit_dict[0][bc]
edit_dist = 0
if (len(bc_matches)==0) and (max_d>=1):
edit_dist+=1
bc_matches = edit_dict[1][bc]
if (len(bc_matches)==0) and (max_d>=2):
edit_dist+=1
bc_matches = edit_dict[2][bc]
if (len(bc_matches)==0) and (max_d>=3):
edit_dist+=1
bc_matches = edit_dict[3][bc]
return bc_matches,edit_dist
def preprocess_fastq(fastq1, fastq2, output_dir, chemistry='v1', bc_edit_dist=3, **params):
"""
Performs all the steps before running the alignment. Temporary files
saved in output_dir.
"""
with open(PATH + '/barcodes/bc_dict_v1.pkl', 'rb') as f:
edit_dict_v1 = pickle.load(f)
with open(PATH + '/barcodes/bc_dict_v2.pkl', 'rb') as f:
edit_dict_v2 = pickle.load(f)
with open(PATH + '/barcodes/bc_dict_v3.pkl', 'rb') as f:
edit_dict_v3 = pickle.load(f)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
bc_edit_dist = int(bc_edit_dist)
# Read in barcode sequences
bc_8nt_v1 = pd.read_csv(PATH + '/barcodes/bc_8nt_v1.csv',names=['barcode'],index_col=0).barcode.values
bc_8nt_v2 = pd.read_csv(PATH + '/barcodes/bc_8nt_v2.csv',names=['barcode'],index_col=0).barcode.values
if chemistry=='v1':
bc1_edit_dict = edit_dict_v1
bc2_edit_dict = edit_dict_v1
bc3_edit_dict = edit_dict_v1
# Amplicon sequence
amp_seq = 'NNNNNNNNNNIIIIIIIIGTGGCCGATGTTTCGCATCGGCGTACGACTIIIIIIIIATCCACGTGCTTGAGAGGCCAGAGCATTCGIIIIIIII'
elif chemistry=='v2':
bc1_edit_dict = edit_dict_v1
bc2_edit_dict = edit_dict_v1
bc3_edit_dict = edit_dict_v2
# Amplicon sequence
amp_seq = 'NNNNNNNNNNIIIIIIIIGTGGCCGATGTTTCGCATCGGCGTACGACTIIIIIIIIATCCACGTGCTTGAGACTGTGGIIIIIIII'
elif chemistry=='v4':
bc1_edit_dict = edit_dict_v1
bc2_edit_dict = edit_dict_v1
bc3_edit_dict = edit_dict_v1 #edited 7/15/19 from v2 -> v1
# Amplicon sequence #edited 7/11/19 for the duplex adapter test
amp_seq = 'NNNNNNNNNNIIIIIIIIGTGGCCGATGTTTCGCATCGGCGTACGACTIIIIIIIIATCCACGTGCTTGAGAGGCCAGAGCATTCGIIIIIIII'
# Get location of cell barcodes in amplicon:
bc_len = 8
bc_starts = []
c = 0
while True:
bc_loc = amp_seq[c:].find('IIIIIIII')
if bc_loc==-1:
break
bc_starts.append(bc_loc + c)
c = bc_starts[-1] + bc_len
print(bc_starts)
def get_perfect_bc_counts(fastq2,n_reads=2000000,reads_in_cells_thresh=0.92):
quality_scores = []
seqs = []
with gzip.open(fastq2) as f:
for i in range(n_reads):
f.readline()
seq = f.readline().decode()[:-1]
f.readline()
qual = f.readline()
seqs.append(seq)
quality_scores.append(qual)
if i %100000==0:
print(i,end=' ')
seqs = pd.Series(seqs)
bc_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test= | pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1) | pandas.concat |
import wandb
import pandas as pd
import logging
logger = logging.getLogger('export')
logging.basicConfig()
api = wandb.Api()
"""
These can be replaced, but make sure you also correct the names in `probing.py` and `training.py` scripts.
"""
WANDB_USERNAME = '<ANONYMIZED>'
MODEL_TRAINING_PROJECT_NAME = 'bias-probing'
ONLINE_CODE_PROJECT_NAME = 'online-code'
def _dump_runs_from(path: str, output_file: str):
logger.info(f'Export from wandb.ai/{path} to {output_file}')
runs = api.runs(path)
summary_list = []
config_list = []
name_list = []
for run in runs:
# run.summary are the output key/values like accuracy. We call ._json_dict to omit large files
summary_list.append(run.summary._json_dict)
config_list.append({k: v for k, v in run.config.items() if not k.startswith('_')})
name_list.append(run.name)
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_records(config_list)
name_df = | pd.DataFrame({'name': name_list}) | pandas.DataFrame |
import os
import copy
import pickle
import numpy as np
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
from tqdm import tqdm
from behavenet import get_user_dir
from behavenet import make_dir_if_not_exists
from behavenet.data.utils import build_data_generator
from behavenet.data.utils import load_labels_like_latents
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_lab_example
from behavenet.fitting.utils import get_session_dir
from behavenet.plotting import concat
from behavenet.plotting import get_crop
from behavenet.plotting import load_latents
from behavenet.plotting import load_metrics_csv_as_df
from behavenet.plotting import save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_input_range', 'compute_range', 'get_labels_2d_for_trial', 'get_model_input',
'interpolate_2d', 'interpolate_1d', 'interpolate_point_path', 'plot_2d_frame_array',
'plot_1d_frame_array', 'make_interpolated', 'make_interpolated_multipanel',
'plot_psvae_training_curves', 'plot_hyperparameter_search_results',
'plot_label_reconstructions', 'plot_latent_traversals', 'make_latent_traversal_movie']
# ----------------------------------------
# low-level util functions
# ----------------------------------------
def get_input_range(
input_type, hparams, sess_ids=None, sess_idx=0, model=None, data_gen=None, version=0,
min_p=5, max_p=95, apply_label_masks=False):
"""Helper function to compute input range for a variety of data types.
Parameters
----------
input_type : :obj:`str`
'latents' | 'labels' | 'labels_sc'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
sess_ids : :obj:`list`, optional
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'; for loading
labels and labels_sc
sess_idx : :obj:`int`, optional
session index into data generator
model : :obj:`AE` object, optional
for generating latents if latent file does not exist
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating latents if latent file does not exist
version : :obj:`int`, optional
specify AE version for loading latents
min_p : :obj:`int`, optional
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`, optional
defines upper end of range; percentile in [0, 100]
apply_label_masks : :obj:`bool`, optional
`True` to set masked values to NaN in labels
Returns
-------
:obj:`dict`
keys are 'min' and 'max'
"""
if input_type == 'latents':
# load latents
latent_file = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % version, latent_file)
if not os.path.exists(filename):
from behavenet.fitting.eval import export_latents
print('latents file not found at %s' % filename)
print('exporting latents...', end='')
filenames = export_latents(data_gen, model)
filename = filenames[0]
print('done')
latents = pickle.load(open(filename, 'rb'))
inputs = latents['latents']
elif input_type == 'labels':
labels = load_labels_like_latents(hparams, sess_ids, sess_idx=sess_idx)
inputs = labels['latents']
elif input_type == 'labels_sc':
hparams2 = copy.deepcopy(hparams)
hparams2['conditional_encoder'] = True # to actually return labels
labels_sc = load_labels_like_latents(
hparams2, sess_ids, sess_idx=sess_idx, data_key='labels_sc')
inputs = labels_sc['latents']
else:
raise NotImplementedError
if apply_label_masks:
masks = load_labels_like_latents(
hparams, sess_ids, sess_idx=sess_idx, data_key='labels_masks')
for i, m in zip(inputs, masks):
i[m == 0] = np.nan
input_range = compute_range(inputs, min_p=min_p, max_p=max_p)
return input_range
def compute_range(values_list, min_p=5, max_p=95):
"""Compute min and max of a list of numbers using percentiles.
Parameters
----------
values_list : :obj:`list`
list of np.ndarrays; min/max calculated over axis 0 once all lists are vertically stacked
min_p : :obj:`int`
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`
defines upper end of range; percentile in [0, 100]
Returns
-------
:obj:`dict`
lower ['min'] and upper ['max'] range of input
"""
if np.any([len(arr) == 0 for arr in values_list]):
values_ = []
for arr in values_list:
if len(arr) != 0:
values_.append(arr)
values = np.vstack(values_)
else:
values = np.vstack(values_list)
ranges = {
'min': np.nanpercentile(values, min_p, axis=0),
'max': np.nanpercentile(values, max_p, axis=0)}
return ranges
def get_labels_2d_for_trial(
hparams, sess_ids, trial=None, trial_idx=None, sess_idx=0, dtype='test', data_gen=None):
"""Return scaled labels (in pixel space) for a given trial.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to build a data generator
sess_ids : :obj:`list` of :obj:`dict`
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating labels
Returns
-------
:obj:`tuple`
- labels_2d_pt (:obj:`torch.Tensor`) of shape (batch, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (batch, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if data_gen is None:
hparams_new = copy.deepcopy(hparams)
hparams_new['conditional_encoder'] = True # ensure scaled labels are returned
hparams_new['device'] = 'cpu'
hparams_new['as_numpy'] = False
hparams_new['batch_load'] = True
data_gen = build_data_generator(hparams_new, sess_ids, export_csv=False)
# get trial
if trial is None:
trial = data_gen.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_gen.datasets[sess_idx][trial]
labels_2d_pt = batch['labels_sc']
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
return labels_2d_pt, labels_2d_np
def get_model_input(
data_generator, hparams, model, trial=None, trial_idx=None, sess_idx=0, max_frames=200,
compute_latents=False, compute_2d_labels=True, compute_scaled_labels=False, dtype='test'):
"""Return images, latents, and labels for a given trial.
Parameters
----------
data_generator: :obj:`ConcatSessionGenerator`
for generating model input
hparams : :obj:`dict`
needs to contain enough information to specify both a model and the associated data
model : :obj:`behavenet.models` object
model type
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
max_frames : :obj:`int`, optional
maximum size of batch to return
compute_latents : :obj:`bool`, optional
`True` to return latents
compute_2d_labels : :obj:`bool`, optional
`True` to return 2d label tensors of shape (batch, n_labels, y_pix, x_pix)
compute_scaled_labels : :obj:`bool`, optional
ignored if `compute_2d_labels` is `True`; if `compute_scaled_labels=True`, return scaled
labels as shape (batch, n_labels) rather than 2d labels as shape
(batch, n_labels, y_pix, x_pix).
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
Returns
-------
:obj:`tuple`
- ims_pt (:obj:`torch.Tensor`) of shape (max_frames, n_channels, y_pix, x_pix)
- ims_np (:obj:`np.ndarray`) of shape (max_frames, n_channels, y_pix, x_pix)
- latents_np (:obj:`np.ndarray`) of shape (max_frames, n_latents)
- labels_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels)
- labels_2d_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (max_frames, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if (trial_idx is None) and (trial is None):
raise ValueError('one of "trial" or "trial_idx" must be specified')
# get trial
if trial is None:
trial = data_generator.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_generator.datasets[sess_idx][trial]
ims_pt = batch['images'][:max_frames]
ims_np = ims_pt.cpu().detach().numpy()
# continuous labels
if hparams['model_class'] == 'ae' \
or hparams['model_class'] == 'vae' \
or hparams['model_class'] == 'beta-tcvae':
labels_pt = None
labels_np = None
elif hparams['model_class'] == 'cond-ae' \
or hparams['model_class'] == 'cond-vae' \
or hparams['model_class'] == 'cond-ae-msp' \
or hparams['model_class'] == 'ps-vae' \
or hparams['model_class'] == 'labels-images':
labels_pt = batch['labels'][:max_frames]
labels_np = labels_pt.cpu().detach().numpy()
else:
raise NotImplementedError
# one hot labels
if hparams['conditional_encoder']:
labels_2d_pt = batch['labels_sc'][:max_frames]
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
else:
if compute_2d_labels:
hparams['session_dir'], sess_ids = get_session_dir(hparams)
labels_2d_pt, labels_2d_np = get_labels_2d_for_trial(hparams, sess_ids, trial=trial)
elif compute_scaled_labels:
labels_2d_pt = None
import h5py
hdf5_file = data_generator.datasets[sess_idx].paths['labels']
with h5py.File(hdf5_file, 'r', libver='latest', swmr=True) as f:
labels_2d_np = f['labels_sc'][str('trial_%04i' % trial)][()].astype('float32')
else:
labels_2d_pt, labels_2d_np = None, None
# latents
if compute_latents:
if hparams['model_class'] == 'cond-ae-msp' or hparams['model_class'] == 'ps-vae':
latents_np = model.get_transformed_latents(ims_pt, dataset=sess_idx, as_numpy=True)
else:
_, latents_np = get_reconstruction(
model, ims_pt, labels=labels_pt, labels_2d=labels_2d_pt, return_latents=True)
else:
latents_np = None
return ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np
def interpolate_2d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of labels/latents, one for each dim
maxes : :obj:`list`
maximum values of labels/latents, one for each dim
input_idxs : :obj:`list`
indices of labels/latents that will be interpolated; for labels, must be y first, then x
for proper marker recording
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
# compute grid for relevant inputs
n_interp_dims = len(input_idxs)
assert n_interp_dims == 2
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_frames):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
# latents_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs, [inputs_sc[0][i0], inputs_sc[1][i1]])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[0]] = inputs[0][i0]
labels[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_1d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of all labels/latents
maxes : :obj:`array-like`
maximum values of all labels/latents
input_idxs : :obj:`array-like`
indices of labels/latents that will be interpolated
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
n_interp_dims = len(input_idxs)
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_interp_dims):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs[i0], inputs_sc[i0][i1])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_point_path(
interp_type, model, ims_0, labels_0, points, n_frames=10, ch=0, crop_kwargs=None,
apply_inverse_transform=True):
"""Return reconstructed images created by interpolating through multiple points.
This function is a simplified version of :func:`interpolate_1d()`; this function computes a
traversal for a single dimension instead of all dimensions; also, this function does not
support conditional encoders, nor does it attempt to compute the interpolated, scaled values
of the labels as :func:`interpolate_1d()` does. This function should supercede
:func:`interpolate_1d()` in a future refactor. Also note that this function is utilized by
the code to make traversal movies, whereas :func:`interpolate_1d()` is utilized by the code to
make traversal plots.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`np.ndarray`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels); these values will be used if
`interp_type='latents'`, and they will be ignored if `inter_type='labels'`
(since `points` will be used)
points : :obj:`list`
one entry for each point in path; each entry is an np.ndarray of shape (n_latents,)
n_frames : :obj:`int` or :obj:`array-like`
number of interpolation points between each point; can be an integer that is used
for all paths, or an array/list of length one less than number of points
ch : :obj:`int`, optional
specify which channel of input images to return; if not an int, all channels are
concatenated in the horizontal dimension
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop (for a fixed crop window)
keys : 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
apply_inverse_transform : :obj:`bool`
if inputs are latents (and model class is 'cond-ae-msp' or 'ps-vae'), apply inverse
transform to put in original latent space
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`np.ndarray`) interpolated images
- inputs_list (:obj:`list` of :obj:`np.ndarray`) interpolated values
"""
if model.hparams.get('conditional_encoder', False):
raise NotImplementedError
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
ims_list = []
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
if interp_type == 'latents':
if model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform,
labels=torch.from_numpy(labels_0).float().to(model.hparams['device']))
else:
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform)
elif interp_type == 'labels':
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=True)
else: # cond-ae
im_tmp = get_reconstruction(
model, ims_0,
labels=torch.from_numpy(vec).float().to(model.hparams['device']))
else:
raise NotImplementedError
if crop_kwargs is not None:
if not isinstance(ch, int):
raise ValueError('"ch" must be an integer to use crop_kwargs')
ims_list.append(get_crop(
im_tmp[0, ch],
crop_kwargs['y_0'], crop_kwargs['y_ext'],
crop_kwargs['x_0'], crop_kwargs['x_ext']))
else:
if isinstance(ch, int):
ims_list.append(np.copy(im_tmp[0, ch]))
else:
ims_list.append(np.copy(concat(im_tmp[0])))
inputs_list.append(vec)
return ims_list, inputs_list
def _get_updated_scaled_labels(labels_og, idxs=None, vals=None):
"""Helper function for interpolate_xd functions."""
if labels_og is not None:
if len(labels_og.shape) == 4:
# 2d scaled labels
tmp = np.copy(labels_og)
t, y, x = np.where(tmp[0] == 1)
labels_sc = np.hstack([x, y])[None, :]
else:
# 1d scaled labels
labels_sc = np.copy(labels_og)
if idxs is not None:
if isinstance(idxs, int):
assert isinstance(vals, float)
idxs = [idxs]
vals = [vals]
else:
assert len(idxs) == len(vals)
for idx, val in zip(idxs, vals):
labels_sc[0, idx] = val
else:
labels_sc = None
return labels_sc
# ----------------------------------------
# mid-level plotting functions
# ----------------------------------------
def plot_2d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, figsize=None, save_file=None,
format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_2d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
n_y = len(ims_list)
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
for c, im in enumerate(ims_list_y):
axes[r, c].imshow(im, **im_kwargs)
axes[r, c].set_xticks([])
axes[r, c].set_yticks([])
if markers is not None:
axes[r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def plot_1d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, plot_ims=True, plot_diffs=True,
figsize=None, save_file=None, format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_1d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
plot_ims : :obj:`bool`, optional
plot images
plot_diffs : :obj:`bool`, optional
plot differences
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
if not (plot_ims or plot_diffs):
raise ValueError('Must plot at least one of ims or diffs')
if plot_ims and plot_diffs:
n_y = len(ims_list) * 2
offset = 2
else:
n_y = len(ims_list)
offset = 1
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
base_im = ims_list_y[0]
for c, im in enumerate(ims_list_y):
# plot original images
if plot_ims:
axes[offset * r, c].imshow(im, **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
if markers is not None:
axes[offset * r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
# plot differences
if plot_diffs and plot_ims:
axes[offset * r + 1, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r + 1, c].set_xticks([])
axes[offset * r + 1, c].set_yticks([])
elif plot_diffs:
axes[offset * r, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def make_interpolated(
ims, save_file, markers=None, text=None, text_title=None, text_color=[1, 1, 1],
frame_rate=20, scale=3, markersize=10, markeredgecolor='w', markeredgewidth=1, ax=None):
"""Make a latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`np.ndarray`
each list element is an array of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`. The movie will
only be saved if `ax` is `NoneType`; else the list of animated frames is returned
markers : :obj:`array-like`, optional
array of size (n_frames, 2) which specifies the (x, y) coordinates of a marker on each
frame
text : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the lower left corner of each
frame
text_title : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the upper left corner of each
frame
text_color : :obj:`array-like`, optional
rgb array specifying color of `text` and `text_title`, if applicable
frame_rate : :obj:`float`, optional
frame rate of saved movie
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
markersize : :obj:`float`, optional
size of marker if `markers` is not `NoneType`
markeredgecolor : :obj:`float`, optional
color of marker edge if `markers` is not `NoneType`
markeredgewidth : :obj:`float`, optional
width of marker edge if `markers` is not `NoneType`
ax : :obj:`matplotlib.axes.Axes` object
optional axis in which to plot the frames; if this argument is not `NoneType` the list of
animated frames is returned and the movie is not saved
Returns
-------
:obj:`list`
list of list of animated frames if `ax` is True; else save movie
"""
y_pix, x_pix = ims[0].shape
if ax is None:
fig_width = scale / 2
fig_height = y_pix / x_pix * scale / 2
fig = plt.figure(figsize=(fig_width, fig_height), dpi=300)
ax = plt.gca()
return_ims = False
else:
return_ims = True
ax.set_xticks([])
ax.set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
txt_kwargs = {
'fontsize': 4, 'color': text_color, 'fontname': 'monospace',
'horizontalalignment': 'left', 'verticalalignment': 'center',
'transform': ax.transAxes}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
im_tmp = []
im_tmp.append(ax.imshow(im, **default_kwargs))
# [s.set_visible(False) for s in ax.spines.values()]
if markers is not None:
im_tmp.append(ax.plot(
markers[i, 0], markers[i, 1], '.r', markersize=markersize,
markeredgecolor=markeredgecolor, markeredgewidth=markeredgewidth)[0])
if text is not None:
im_tmp.append(ax.text(0.02, 0.06, text[i], **txt_kwargs))
if text_title is not None:
im_tmp.append(ax.text(0.02, 0.92, text_title[i], **txt_kwargs))
ims_ani.append(im_tmp)
if return_ims:
return ims_ani
else:
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def make_interpolated_multipanel(
ims, save_file, markers=None, text=None, text_title=None, frame_rate=20, n_cols=3, scale=1,
**kwargs):
"""Make a multi-panel latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`list` of :obj:`np.ndarray`
each list element is used to for a single panel, and is another list that contains arrays
of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`.
markers : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames, 2)
which specifies the (x, y) coordinates of a marker on each frame for that panel
text : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the lower left corner of each frame for that panel
text_title : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the upper left corner of each frame for that panel
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_cols : :obj:`int`, optional
movie is `n_cols` panels wide
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
kwargs
arguments are additional arguments to :func:`make_interpolated`, like 'markersize',
'markeredgewidth', 'markeredgecolor', etc.
"""
n_panels = len(ims)
markers = [None] * n_panels if markers is None else markers
text = [None] * n_panels if text is None else text
y_pix, x_pix = ims[0][0].shape
n_rows = int(np.ceil(n_panels / n_cols))
fig_width = scale / 2 * n_cols
fig_height = y_pix / x_pix * scale / 2 * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height), dpi=300)
plt.subplots_adjust(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)
# fill out empty panels with black frames
while len(ims) < n_rows * n_cols:
ims.append(np.zeros(ims[0].shape))
markers.append(None)
text.append(None)
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, (ims_curr, markers_curr, text_curr) in enumerate(zip(ims, markers, text)):
col = i % n_cols
row = int(np.floor(i / n_cols))
if i == 0:
text_title_str = text_title
else:
text_title_str = None
if n_rows == 1:
ax = axes[col]
elif n_cols == 1:
ax = axes[row]
else:
ax = axes[row, col]
ims_ani_curr = make_interpolated(
ims=ims_curr, markers=markers_curr, text=text_curr, text_title=text_title_str, ax=ax,
save_file=None, **kwargs)
ims_ani.append(ims_ani_curr)
# turn off other axes
i += 1
while i < n_rows * n_cols:
col = i % n_cols
row = int(np.floor(i / n_cols))
axes[row, col].set_axis_off()
i += 1
# rearrange ims:
# currently a list of length n_panels, each element of which is a list of length n_t
# we need a list of length n_t, each element of which is a list of length n_panels
n_frames = len(ims_ani[0])
ims_final = [[] for _ in range(n_frames)]
for i in range(n_frames):
for j in range(n_panels):
ims_final[i] += ims_ani[j][i]
ani = animation.ArtistAnimation(fig, ims_final, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
# ----------------------------------------
# high-level plotting functions
# ----------------------------------------
def _get_psvae_hparams(**kwargs):
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'model_class': 'ps-vae',
'model_type': 'conv',
'rng_seed_data': 0,
'trial_splits': '8;1;1;0',
'train_frac': 1.0,
'rng_seed_model': 0,
'fit_sess_io_layers': False,
'learning_rate': 1e-4,
'l2_reg': 0,
'conditional_encoder': False,
'vae.beta': 1}
# update hparams
for key, val in kwargs.items():
if key == 'alpha' or key == 'beta' or key == 'gamma':
hparams['ps_vae.%s' % key] = val
else:
hparams[key] = val
return hparams
def plot_psvae_training_curves(
lab, expt, animal, session, alphas, betas, gammas, n_ae_latents, rng_seeds_model,
experiment_name, n_labels, dtype='val', save_file=None, format='pdf', **kwargs):
"""Create training plots for each term in the ps-vae objective function.
The `dtype` argument controls which type of trials are plotted ('train' or 'val').
Additionally, multiple models can be plotted simultaneously by varying one (and only one) of
the following parameters:
- alpha
- beta
- gamma
- number of unsupervised latents
- random seed used to initialize model weights
Each of these entries must be an array of length 1 except for one option, which can be an array
of arbitrary length (corresponding to already trained models). This function generates a single
plot with panels for each of the following terms:
- total loss
- pixel mse
- label R^2 (note the objective function contains the label MSE, but R^2 is easier to parse)
- KL divergence of supervised latents
- index-code mutual information of unsupervised latents
- total correlation of unsupervised latents
- dimension-wise KL of unsupervised latents
- subspace overlap
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
alphas : :obj:`array-like`
alpha values to plot
betas : :obj:`array-like`
beta values to plot
gammas : :obj:`array-like`
gamma values to plot
n_ae_latents : :obj:`array-like`
unsupervised dimensionalities to plot
rng_seeds_model : :obj:`array-like`
model seeds to plot
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`int`
dimensionality of supervised latent space
dtype : :obj:`str`
'train' | 'val'
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
# check for arrays, turn ints into lists
n_arrays = 0
hue = None
if len(alphas) > 1:
n_arrays += 1
hue = 'alpha'
if len(betas) > 1:
n_arrays += 1
hue = 'beta'
if len(gammas) > 1:
n_arrays += 1
hue = 'gamma'
if len(n_ae_latents) > 1:
n_arrays += 1
hue = 'n latents'
if len(rng_seeds_model) > 1:
n_arrays += 1
hue = 'rng seed'
if n_arrays > 1:
raise ValueError(
'Can only set one of "alphas", "betas", "gammas", "n_ae_latents", or ' +
'"rng_seeds_model" as an array')
# set model info
hparams = _get_psvae_hparams(experiment_name=experiment_name, **kwargs)
metrics_list = [
'loss', 'loss_data_mse', 'label_r2',
'loss_zs_kl', 'loss_zu_mi', 'loss_zu_tc', 'loss_zu_dwkl', 'loss_AB_orth']
metrics_dfs = []
i = 0
for alpha in alphas:
for beta in betas:
for gamma in gammas:
for n_latents in n_ae_latents:
for rng in rng_seeds_model:
# update hparams
hparams['ps_vae.alpha'] = alpha
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
hparams['n_ae_latents'] = n_latents + n_labels
hparams['rng_seed_model'] = rng
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print(
'loading results with alpha=%i, beta=%i, gamma=%i (version %i)' %
(alpha, beta, gamma, version))
metrics_dfs.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None))
metrics_dfs[i]['alpha'] = alpha
metrics_dfs[i]['beta'] = beta
metrics_dfs[i]['gamma'] = gamma
metrics_dfs[i]['n latents'] = hparams['n_ae_latents']
metrics_dfs[i]['rng seed'] = rng
i += 1
except TypeError:
print(
'could not find model for alpha=%i, beta=%i, gamma=%i' %
(alpha, beta, gamma))
continue
metrics_df = pd.concat(metrics_dfs, sort=False)
sns.set_style('white')
sns.set_context('talk')
data_queried = metrics_df[
(metrics_df.epoch > 10) & ~pd.isna(metrics_df.val) & (metrics_df.dtype == dtype)]
g = sns.FacetGrid(
data_queried, col='loss', col_wrap=3, hue=hue, sharey=False, height=4)
g = g.map(plt.plot, 'epoch', 'val').add_legend() # , color=".3", fit_reg=False, x_jitter=.1);
if save_file is not None:
make_dir_if_not_exists(save_file)
g.savefig(save_file + '.' + format, dpi=300, format=format)
def plot_hyperparameter_search_results(
lab, expt, animal, session, n_labels, label_names, alpha_weights, alpha_n_ae_latents,
alpha_expt_name, beta_weights, gamma_weights, beta_gamma_n_ae_latents,
beta_gamma_expt_name, alpha, beta, gamma, save_file, batch_size=None, format='pdf',
**kwargs):
"""Create a variety of diagnostic plots to assess the ps-vae hyperparameters.
These diagnostic plots are based on the recommended way to perform a hyperparameter search in
the ps-vae models; first, fix beta=1 and gamma=0, and do a sweep over alpha values and number
of latents (for example alpha=[50, 100, 500, 1000] and n_ae_latents=[2, 4, 8, 16]). The best
alpha value is subjective because it involves a tradeoff between pixel mse and label mse. After
choosing a suitable value, fix alpha and the number of latents and vary beta and gamma. This
function will then plot the following panels:
- pixel mse as a function of alpha/num latents (for fixed beta/gamma)
- label mse as a function of alpha/num_latents (for fixed beta/gamma)
- pixel mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- label mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- index-code mutual information (part of the KL decomposition) as a function of beta/gamma (for
fixed alpha/n_ae_latents)
- total correlation(part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- dimension-wise KL (part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- average correlation coefficient across all pairs of unsupervised latent dims as a function of
beta/gamma (for fixed alpha/n_ae_latents)
- subspace overlap computed as ||[A; B] - I||_2^2 for A, B the projections to the supervised
and unsupervised subspaces, respectively, and I the identity - as a function of beta/gamma
(for fixed alpha/n_ae_latents)
- example subspace overlap matrix for gamma=0 and beta=1, with fixed alpha/n_ae_latents
- example subspace overlap matrix for gamma=1000 and beta=1, with fixed alpha/n_ae_latents
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
n_labels : :obj:`str`
number of label dims
label_names : :obj:`array-like`
names of label dims
alpha_weights : :obj:`array-like`
array of alpha weights for fixed values of beta, gamma
alpha_n_ae_latents : :obj:`array-like`
array of latent dimensionalities for fixed values of beta, gamma using alpha_weights
alpha_expt_name : :obj:`str`
test-tube experiment name of alpha-based hyperparam search
beta_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
gamma_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
beta_gamma_n_ae_latents : :obj:`int`
latent dimensionality used for beta-gamma hyperparam search
beta_gamma_expt_name : :obj:`str`
test-tube experiment name of beta-gamma hyperparam search
alpha : :obj:`float`
fixed value of alpha for beta-gamma search
beta : :obj:`float`
fixed value of beta for alpha search
gamma : :obj:`float`
fixed value of gamma for alpha search
save_file : :obj:`str`
absolute path of save file; does not need file extension
batch_size : :obj:`int`, optional
size of batches, used to compute correlation coefficient per batch; if NoneType, the
correlation coefficient is computed across all time points
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, preceded by either `alpha_` or `beta_gamma_`. For example,
to set the train frac of the alpha models, use `alpha_train_frac`; to set the rng_data_seed
of the beta-gamma models, use `beta_gamma_rng_data_seed`.
"""
def apply_masks(data, masks):
return data[masks == 1]
def get_label_r2(hparams, model, data_generator, version, dtype='val', overwrite=False):
from sklearn.metrics import r2_score
save_file = os.path.join(
hparams['expt_dir'], 'version_%i' % version, 'r2_supervised.csv')
if not os.path.exists(save_file) or overwrite:
if not os.path.exists(save_file):
print('R^2 metrics do not exist; computing from scratch')
else:
print('overwriting metrics at %s' % save_file)
metrics_df = []
data_generator.reset_iterators(dtype)
for i_test in tqdm(range(data_generator.n_tot_batches[dtype])):
# get next minibatch and put it on the device
data, sess = data_generator.next_batch(dtype)
x = data['images'][0]
y = data['labels'][0].cpu().detach().numpy()
if 'labels_masks' in data:
n = data['labels_masks'][0].cpu().detach().numpy()
else:
n = np.ones_like(y)
z = model.get_transformed_latents(x, dataset=sess)
for i in range(n_labels):
y_true = apply_masks(y[:, i], n[:, i])
y_pred = apply_masks(z[:, i], n[:, i])
if len(y_true) > 10:
r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
mse = np.mean(np.square(y_true - y_pred))
else:
r2 = np.nan
mse = np.nan
metrics_df.append(pd.DataFrame({
'Trial': data['batch_idx'].item(),
'Label': label_names[i],
'R2': r2,
'MSE': mse,
'Model': 'PS-VAE'}, index=[0]))
metrics_df = pd.concat(metrics_df)
print('saving results to %s' % save_file)
metrics_df.to_csv(save_file, index=False, header=True)
else:
print('loading results from %s' % save_file)
metrics_df = pd.read_csv(save_file)
return metrics_df
# -----------------------------------------------------
# load pixel/label MSE as a function of n_latents/alpha
# -----------------------------------------------------
# set model info
hparams = _get_psvae_hparams(experiment_name=alpha_expt_name)
# update hparams
for key, val in kwargs.items():
# hparam vals should be named 'alpha_[property]', for example 'alpha_train_frac'
if key.split('_')[0] == 'alpha':
prop = key[6:]
hparams[prop] = val
else:
hparams[key] = val
metrics_list = ['loss_data_mse']
metrics_dfs_frame = []
metrics_dfs_marker = []
for n_latent in alpha_n_ae_latents:
hparams['n_ae_latents'] = n_latent + n_labels
for alpha_ in alpha_weights:
hparams['ps_vae.alpha'] = alpha_
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print('loading results with alpha=%i, beta=%i, gamma=%i (version %i)' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma'],
version))
# get frame mse
metrics_dfs_frame.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None, test=True))
metrics_dfs_frame[-1]['alpha'] = alpha_
metrics_dfs_frame[-1]['n_latents'] = hparams['n_ae_latents']
# get marker mse
model, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version)
metrics_df_ = get_label_r2(hparams, model, data_gen, version, dtype='val')
metrics_df_['alpha'] = alpha_
metrics_df_['n_latents'] = hparams['n_ae_latents']
metrics_dfs_marker.append(metrics_df_[metrics_df_.Model == 'PS-VAE'])
except TypeError:
print('could not find model for alpha=%i, beta=%i, gamma=%i' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma']))
continue
metrics_df_frame = | pd.concat(metrics_dfs_frame, sort=False) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_yuval / 'hydro'
axis_path = work_yuval/'axis'
gis_path = work_yuval / 'gis'
ims_path = work_yuval / 'IMS_T'
hydro_ml_path = hydro_path / 'hydro_ML'
gnss_path = work_yuval / 'GNSS_stations'
# 'tela': 17135
hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165,
'ramo': 56140, 'drag': 48125, 'dsea': 48192,
'spir': 56150, 'nrif': 60105, 'elat': 60190
}
hydro_st_name_dict = {25191: 'Lavan - new nizana road',
21105: 'Shikma - Tel milcha',
55165: 'Mamsheet',
56140: 'Ramon',
48125: 'Draga',
48192: 'Chiemar - down the cliff',
46150: 'Nekrot - Top',
60105: 'Yaelon - Kibutz Yahel',
60190: 'Solomon - Eilat'}
best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02,
'coef0': 0.0, 'degree': 1},
'RF': {'max_depth': 5, 'max_features': 'auto',
'min_samples_leaf': 1, 'min_samples_split': 2,
'n_estimators': 400},
'MLP': {'alpha': 0.1, 'activation': 'relu',
'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant',
'solver': 'lbfgs'}}
scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss']
tsafit_dict = {'lat': 30.985556, 'lon': 35.263056,
'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'}
axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha']
soi_axis_dict = {'yrcm': 'Dimo',
'slom': 'Ohad',
'dsea': 'Ddse',
'nrif': 'Yotv',
'elat': 'Elat',
'klhv': 'Raha',
'spir': 'Yaha'}
def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
features = ['pwv', 'pressure', 'DOY']
# sns.set_palette('Dark2', 6)
sns.set_theme(style='ticks', font_scale=1.5)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True)
gr_spec = [20, 20, 1]
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(features):
fe = [x for x in sv['feature'].values if f in x]
dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe()
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8, color='k', alpha=0.8)
axes[i].set_title(title)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
# axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center')
axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
# n = sum(['pwv' in x for x in sv.feature.values])
axes[2].xaxis.set_ticklabels('')
axes[2].set_xlabel('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].tick_params()
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
filename = 'RF_shap_values_{}.png'.format('+'.join(features))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def read_binary_classification_shap_values_to_pandas(shap_values, X):
import xarray as xr
SV0 = X.copy(data=shap_values[0])
SV1 = X.copy(data=shap_values[1])
SV = xr.concat([SV0, SV1], dim='clas')
SV['clas'] = [0, 1]
return SV
def get_shap_values_RF_classifier(plot=True):
import shap
X, y = combine_pos_neg_from_nc_file()
ml = ML_Classifier_Switcher()
rf = ml.pick_model('RF')
rf.set_params(**best_hp_models_dict['RF'])
X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy'])
rf.fit(X, y)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X.values)
if plot:
shap.summary_plot(shap_values, X, feature_names=[
x for x in X.feature.values], max_display=49, sort=False)
return shap_values
def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval):
import pandas as pd
import xarray as xr
from PW_stations import produce_geo_gnss_solved_stations
from interpolation_routines import interpolate_var_ds_at_multiple_dts
from aux_gps import save_ncfile
# get gnss soi-apn pwv data and geo-meta data:
geo_df = produce_geo_gnss_solved_stations(plot=False)
pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw = pw.sel(time=slice('2018-04-25', '2018-04-26'))
pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom'])
# get tsafit data:
predict_df = pd.DataFrame(tsafit_dict, index=['tsafit'])
df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df)
da=df_inter['interpolated_lr_fixed'].to_xarray()
da.name = 'pwv'
da.attrs['operation'] = 'interploated from SOI-APN PWV data'
da.attrs['WV scale height'] = 'variable from SOI-APN data'
da.attrs.update(**tsafit_dict)
if savepath is not None:
filename = 'Tsafit_PWV_event.nc'
save_ncfile(da, savepath, filename)
return da
def plot_tsafit_event(path=work_yuval):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc')
fig, ax = plt.subplots(figsize=(11, 8))
da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00'))
# da_sliced.name = 'PWV [mm]'
da_sliced = da_sliced.rename({'time': 'Time [UTC]'})
da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False)
dt = pd.to_datetime(da.attrs['dt_utc'])
ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event'])
ax.grid(True)
# ax.set_xlabel('Time [UTC]')
fig.tight_layout()
fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26')
fig.subplots_adjust(top=0.941)
return fig
# TODO: treat all pwv from events as follows:
# For each station:
# 0) rolling mean to all pwv 1 hour
# 1) take 288 points before events, if < 144 gone then drop
# 2) interpolate them 12H using spline/other
# 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station
# 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear
# pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features
# fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure
# 5) feature addition: should be like pwv steps 1-3,
# 6) negative events should be sampled separtely, for
# 7) now prepare pwv and pressure to single ds with 1 hourly sample rate
# 8) produce positives and save them to file!
# 9) produce a way to get negatives considering the positives
# maybe implement permutaion importance to pwv ? see what is more important to
# the model in 24 hours ? only on SVC and MLP ?
# implemetn TSS and HSS scores and test them (make_scorer from confusion matrix)
# redo results but with inner and outer splits of 4, 4
# plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire
# train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should
# use holdout set
# implement repeatedstratifiedkfold and run it...
# check for stability of the gridsearch CV...also run with 4-folds ?
# finalize the permutation_importances and permutation_test_scores
def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import xr_reindex_with_date_range
feats = xr.load_dataset(
hydro_path/'hydro_tides_hourly_features_with_positives.nc')
ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'})
da_list = []
for da in ds:
time = ds[da].dropna('time')
daa = time.copy(data=np.ones(time.shape))
daa['time'] = pd.to_datetime(time.values)
daa.name = time.name + '_tide'
da_list.append(daa)
ds = xr.merge(da_list)
li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds]
ds = xr.merge(li)
return ds
def select_features_from_X(X, features='pwv'):
if isinstance(features, str):
f = [x for x in X.feature.values if features in x]
X = X.sel(feature=f)
elif isinstance(features, list):
fs = []
for f in features:
fs += [x for x in X.feature.values if f in x]
X = X.sel(feature=fs)
return X
def combine_pos_neg_from_nc_file(hydro_path=hydro_path,
negative_sample_num=1,
seed=1, std=True):
from aux_gps import path_glob
from sklearn.utils import resample
import xarray as xr
import numpy as np
# import pandas as pd
if std:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1]
else:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1]
ds = xr.open_dataset(file)
# get the positive features and produce target:
X_pos = ds['X_pos'].rename({'positive_sample': 'sample'})
y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample'])
y_pos['sample'] = X_pos['sample']
# choose at random y_pos size of negative class:
X_neg = ds['X_neg'].rename({'negative_sample': 'sample'})
pos_size = y_pos['sample'].size
np.random.seed(seed)
# negatives = []
for n_samples in [x for x in range(negative_sample_num)]:
# dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size,
# replace=False)
# print(np.unique(dts).shape)
# negatives.append(X_neg.sel(sample=dts))
negative = resample(X_neg, replace=False,
n_samples=pos_size * negative_sample_num,
random_state=seed)
negatives = np.split(negative, negative_sample_num, axis=0)
Xs = []
ys = []
for X_negative in negatives:
y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample'])
y_neg['sample'] = X_negative['sample']
# now concat all X's and y's:
X = xr.concat([X_pos, X_negative], 'sample')
y = xr.concat([y_pos, y_neg], 'sample')
X.name = 'X'
Xs.append(X)
ys.append(y)
if len(negatives) == 1:
return Xs[0], ys[0]
else:
return Xs, ys
def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True):
import numpy as np
Xcopy = X.copy()
pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pwvs_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pwvs_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop)
pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pressures_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pressures_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pressures_to_drop)
return Xcopy
def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path):
import xarray as xr
import pandas as pd
pos_da = xr.open_dataset(
hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X']
dt_pos = pos_da.sample.to_dataframe()
dt_neg = neg_da.sample.to_dataframe()
dt_all = dt_pos.index.union(dt_neg.index)
dff = pd.DataFrame(dt_all, index=dt_all)
dff = dff.sort_index()
samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))]
num = samples_within.size
print('samples that are within a day of each other: {}'.format(num))
print('samples are: {}'.format(samples_within))
return dff
def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42,
batches=1, verbose=1, std=True):
# do the same thing for pressure (as for pwv), but not for
import xarray as xr
import numpy as np
import pandas as pd
from aux_gps import save_ncfile
feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc')
feats = feats.rename({'doy': 'DOY'})
if std:
pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
pos_filename = 'hydro_tides_hourly_features_with_positives.nc'
all_tides = xr.open_dataset(
hydro_path / pos_filename)['X_pos']
# pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes']
tides = xr.open_dataset(
hydro_path / pos_filename)['Tides']
# get the positives (tide events) for each station:
df_stns = tides.to_dataset('GNSS').to_dataframe()
# get all positives (tide events) for all stations:
df = all_tides.positive_sample.to_dataframe()['positive_sample']
df.columns = ['sample']
stns = [x for x in hydro_pw_dict.keys()]
other_feats = ['DOY', 'doy_sin', 'doy_cos']
# main stns df features (pwv)
pwv_df = feats[stns].to_dataframe()
pressure = feats['bet-dagan'].to_dataframe()['bet-dagan']
# define the initial no_choice_dt_range from the positive dt_range:
no_choice_dt_range = [pd.date_range(
start=dt, periods=48, freq='H') for dt in df]
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack(no_choice_dt_range)))
dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range)
# dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range)
# loop over all stns and produce negative events:
np.random.seed(seed)
neg_batches = []
for i in np.arange(1, batches + 1):
if verbose >= 0:
print('preparing batch {}:'.format(i))
neg_stns = []
for stn in stns:
dts_df = df_stns[stn].dropna()
pwv = pwv_df[stn].dropna()
# loop over all events in on stn:
negatives = []
negatives_pressure = []
# neg_samples = []
if verbose >= 1:
print('finding negatives for station {}, events={}'.format(
stn, len(dts_df)))
# print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M')))
cnt = 0
while cnt < len(dts_df):
# get random number from each stn pwv:
# r = np.random.randint(low=0, high=len(pwv.index))
# random_dt = pwv.index[r]
random_dt = np.random.choice(dts_to_choose_from)
negative_dt_range = pd.date_range(
start=random_dt, periods=24, freq='H')
if not (no_choice_dt_range.intersection(negative_dt_range)).empty:
# print('#')
if verbose >= 2:
print('Overlap!')
continue
# get the actual pwv and check it is full (24hours):
negative = pwv.loc[pwv.index.intersection(negative_dt_range)]
neg_pressure = pressure.loc[pwv.index.intersection(
negative_dt_range)]
if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24:
# print('!')
if verbose >= 2:
print('NaNs!')
continue
if verbose >= 2:
print('number of dts that are already chosen: {}'.format(
len(no_choice_dt_range)))
negatives.append(negative)
negatives_pressure.append(neg_pressure)
# now add to the no_choice_dt_range the negative dt_range we just aquired:
negative_dt_range_with_padding = pd.date_range(
start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H')
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding])))
dts_to_choose_from = dts_to_choose_from.difference(
no_choice_dt_range)
if verbose >= 2:
print('number of dts to choose from: {}'.format(
len(dts_to_choose_from)))
cnt += 1
neg_da = xr.DataArray(negatives, dims=['sample', 'feature'])
neg_da['feature'] = ['{}_{}'.format(
'pwv', x) for x in np.arange(1, 25)]
neg_samples = [x.index[0] for x in negatives]
neg_da['sample'] = neg_samples
neg_pre_da = xr.DataArray(
negatives_pressure, dims=['sample', 'feature'])
neg_pre_da['feature'] = ['{}_{}'.format(
'pressure', x) for x in np.arange(1, 25)]
neg_pre_samples = [x.index[0] for x in negatives_pressure]
neg_pre_da['sample'] = neg_pre_samples
neg_da = xr.concat([neg_da, neg_pre_da], 'feature')
neg_da = neg_da.sortby('sample')
neg_stns.append(neg_da)
da_stns = xr.concat(neg_stns, 'sample')
da_stns = da_stns.sortby('sample')
# now loop over the remaining features (which are stns agnostic)
# and add them with the same negative datetimes of the pwv already aquired:
dts = [pd.date_range(x.item(), periods=24, freq='H')
for x in da_stns['sample']]
dts_samples = [x[0] for x in dts]
other_feat_list = []
for feat in feats[other_feats]:
# other_feat_sample_list = []
da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample'])
# for dt in dts_samples:
# da_other = xr.DataArray(feats[feat].sel(
# time=dt).values, dims=['feature'])
da_other['sample'] = dts_samples
other_feat_list.append(da_other)
# other_feat_da = xr.concat(other_feat_sample_list, 'feature')
da_other_feats = xr.concat(other_feat_list, 'feature')
da_other_feats['feature'] = other_feats
da_stns = xr.concat([da_stns, da_other_feats], 'feature')
neg_batches.append(da_stns)
neg_batch_da = xr.concat(neg_batches, 'sample')
# neg_batch_da['batch'] = np.arange(1, batches + 1)
neg_batch_da.name = 'X_neg'
feats['X_neg'] = neg_batch_da
feats['X_pos'] = all_tides
feats['X_pwv_stns'] = tides
# feats['tide_datetimes'] = pos_tides
feats = feats.rename({'sample': 'negative_sample'})
if std:
filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format(
batches)
else:
filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format(
batches)
save_ncfile(feats, hydro_path, filename)
return neg_batch_da
def produce_positives_from_feature_file(hydro_path=hydro_path, std=True):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import save_ncfile
# load features:
if std:
file = hydro_path / 'hydro_tides_hourly_features_std.nc'
else:
file = hydro_path / 'hydro_tides_hourly_features.nc'
feats = xr.load_dataset(file)
feats = feats.rename({'doy': 'DOY'})
# load positive event for each station:
dfs = [read_station_from_tide_database(hydro_pw_dict.get(
x), rounding='1H') for x in hydro_pw_dict.keys()]
dfs = check_if_tide_events_from_stations_are_within_time_window(
dfs, days=1, rounding=None, return_hs_list=True)
da_list = []
positives_per_station = []
for i, feat in enumerate(feats):
try:
_, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i],
plot=False, rolling=None,
days_prior=1,
drop_thresh=0.75,
max_gap='6H',
verbose=0)
print('getting positives from station {}'.format(feat))
positives = [pd.to_datetime(
(x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr]
da = xr.DataArray(pr, dims=['sample', 'feature'])
da['sample'] = positives
positives_per_station.append(positives)
da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)]
da_list.append(da)
except IndexError:
continue
da_pwv = xr.concat(da_list, 'sample')
da_pwv = da_pwv.sortby('sample')
# now add more features:
da_list = []
for feat in ['bet-dagan']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt_end in da_pwv.sample:
dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H')
dt_end_end = pd.to_datetime(
dt_end.item()) - pd.Timedelta(1, unit='H')
positive = feats[feat].sel(time=slice(dt_st, dt_end_end))
positives.append(positive)
da = xr.DataArray(positives, dims=['sample', 'feature'])
da['sample'] = da_pwv.sample
if feat == 'bet-dagan':
feat_name = 'pressure'
else:
feat_name = feat
da['feature'] = ['{}_{}'.format(feat_name, x)
for x in np.arange(1, 25)]
da_list.append(da)
da_f = xr.concat(da_list, 'feature')
da_list = []
for feat in ['DOY', 'doy_sin', 'doy_cos']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt in da_pwv.sample:
positive = feats[feat].sel(time=dt)
positives.append(positive)
da = xr.DataArray(positives, dims=['sample'])
da['sample'] = da_pwv.sample
# da['feature'] = feat
da_list.append(da)
da_ff = xr.concat(da_list, 'feature')
da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos']
da = xr.concat([da_pwv, da_f, da_ff], 'feature')
if std:
filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
filename = 'hydro_tides_hourly_features_with_positives.nc'
feats['X_pos'] = da
# now add positives per stations:
pdf = pd.DataFrame(positives_per_station).T
pdf.index.name = 'tide_event'
pos_da = pdf.to_xarray().to_array('GNSS')
pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()]
pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.'
feats['Tides'] = pos_da
# rename sample to positive sample:
feats = feats.rename({'sample': 'positive_sample'})
save_ncfile(feats, hydro_path, filename)
return feats
def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path,
savepath=hydro_path, std=True):
import xarray as xr
from aux_gps import save_ncfile
import numpy as np
# pwv = xr.load_dataset(
if std:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc'
else:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc'
# work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pwv = xr.load_dataset(work_path / pwv_filename)
pwv_stations = [x for x in hydro_pw_dict.keys()]
pwv = pwv[pwv_stations]
# pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True)
pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True)
# bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc')
bd = xr.load_dataset(ims_path / pre_filename)
# min_time = pwv.dropna('time')['time'].min()
# bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean()
bd = bd.sel(time=slice('1996', None))
pressure = bd['bet-dagan']
doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear)
doy.name = 'doy'
doy_sin = np.sin(doy * np.pi / 183)
doy_sin.name = 'doy_sin'
doy_cos = np.cos(doy * np.pi / 183)
doy_cos.name = 'doy_cos'
ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos])
if std:
filename = 'hydro_tides_hourly_features_std.nc'
else:
filename = 'hydro_tides_hourly_features.nc'
save_ncfile(ds, savepath, filename)
return ds
def plot_all_decompositions(X, y, n=2):
import xarray as xr
models = [
'PCA',
'LDA',
'ISO_MAP',
'LLE',
'LLE-modified',
'LLE-hessian',
'LLE-ltsa',
'MDA',
'RTE',
'SE',
'TSNE',
'NCA']
names = [
'Principal Components',
'Linear Discriminant',
'Isomap',
'Locally Linear Embedding',
'Modified LLE',
'Hessian LLE',
'Local Tangent Space Alignment',
'MDS embedding',
'Random forest',
'Spectral embedding',
't-SNE',
'NCA embedding']
name_dict = dict(zip(models, names))
da = xr.DataArray(models, dims=['model'])
da['model'] = models
fg = xr.plot.FacetGrid(da, col='model', col_wrap=4,
sharex=False, sharey=False)
for model_str, ax in zip(da['model'].values, fg.axes.flatten()):
model = model_str.split('-')[0]
method = model_str.split('-')[-1]
if model == method:
method = None
try:
ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax)
except ValueError:
pass
ax.set_title(name_dict[model_str])
ax.set_xlabel('')
ax.set_ylabel('')
fg.fig.suptitle('various decomposition projections (n={})'.format(n))
return
def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None):
from sklearn import (manifold, decomposition, ensemble,
discriminant_analysis, neighbors)
import matplotlib.pyplot as plt
import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
n_neighbors = 30
if model == 'PCA':
X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X)
elif model == 'LDA':
X2 = X.copy()
X2.values.flat[::X.shape[1] + 1] += 0.01
X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n
).fit_transform(X2, y)
elif model == 'ISO_MAP':
X_decomp = manifold.Isomap(
n_neighbors, n_components=n).fit_transform(X)
elif model == 'LLE':
# method = 'standard', 'modified', 'hessian' 'ltsa'
if method is None:
method = 'standard'
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method=method)
X_decomp = clf.fit_transform(X)
elif model == 'MDA':
clf = manifold.MDS(n_components=n, n_init=1, max_iter=100)
X_decomp = clf.fit_transform(X)
elif model == 'RTE':
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=n)
X_decomp = pca.fit_transform(X_transformed)
elif model == 'SE':
embedder = manifold.SpectralEmbedding(n_components=n, random_state=0,
eigen_solver="arpack")
X_decomp = embedder.fit_transform(X)
elif model == 'TSNE':
tsne = manifold.TSNE(n_components=n, init='pca', random_state=0)
X_decomp = tsne.fit_transform(X)
elif model == 'NCA':
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=n, random_state=0)
X_decomp = nca.fit_transform(X, y)
df = pd.DataFrame(X_decomp)
df.columns = [
'{}_{}'.format(
model,
x +
1) for x in range(
X_decomp.shape[1])]
df['flood'] = y
df['flood'] = df['flood'].astype(int)
df_1 = df[df['flood'] == 1]
df_0 = df[df['flood'] == 0]
if X_decomp.shape[1] == 1:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='r', marker='x',
label='0',
s=50)
elif X_decomp.shape[1] == 2:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='r',
label='0',
s=50)
elif X_decomp.shape[1] == 3:
ax = plt.figure().gca(projection='3d')
# df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee)
ax.scatter(df_1['{}_1'.format(model)],
df_1['{}_2'.format(model)],
df_1['{}_3'.format(model)],
color='b',
label='1',
s=50)
ax.scatter(df_0['{}_1'.format(model)],
df_0['{}_2'.format(model)],
df_0['{}_3'.format(model)],
color='r',
label='0',
s=50)
ax.set_xlabel('{}_1'.format(model))
ax.set_ylabel('{}_2'.format(model))
ax.set_zlabel('{}_3'.format(model))
return ax
def permutation_scikit(X, y, cv=False, plot=True):
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
if not cv:
clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.032374575428176434,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf = SVC(kernel='linear')
# clf = LinearDiscriminantAnalysis()
cv = StratifiedKFold(4, shuffle=True)
# cv = KFold(4, shuffle=True)
n_classes = 2
score, permutation_scores, pvalue = permutation_test_score(
clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=42)
param_grid = {
'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50),
'kernel': ['rbf', 'poly', 'sigmoid']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test, grid_predictions))
return
def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv,
kfold_name='inner_kfold'):
from sklearn.model_selection import GridSearchCV
import xarray as xr
import numpy as np
if isinstance(model, GridSearchCV):
model = model.best_estimator_
ds_list = []
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_true = y[val]
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
y_true_da = xr.DataArray(y_true, dims=['sample'])
y_pred_da = xr.DataArray(y_pred, dims=['sample'])
y_prob_da = xr.DataArray(lr_probs, dims=['sample'])
ds = xr.Dataset()
ds['y_true'] = y_true_da
ds['y_pred'] = y_pred_da
ds['y_prob'] = y_prob_da
ds['sample'] = np.arange(0, len(X[val]))
ds_list.append(ds)
ds = xr.concat(ds_list, kfold_name)
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
return ds
def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'):
import numpy as np
import xarray as xr
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# TODO: collect all predictions and y_tests from this, also predict_proba
# and save, then calculte everything elsewhere.
if isinstance(model, GridSearchCV):
model = model.best_estimator_
tprs = []
aucs = []
pr = []
pr_aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
precision, recall, _ = precision_recall_curve(y[val], lr_probs)
pr.append(recall)
average_precision = average_precision_score(y[val], y_pred)
pr_aucs.append(average_precision)
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# std_tpr = np.std(tprs, axis=0)
tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr'])
auc_da = xr.DataArray(aucs, dims=[kfold_name])
ds = xr.Dataset()
ds['TPR'] = tpr_da
ds['AUC'] = auc_da
ds['fpr'] = mean_fpr
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
# variability for each tpr is ds['TPR'].std('kfold')
return ds
def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv',
n_splits=3, test_ratio=0.25,
scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
if param_grid == 'light':
print(np.unique(X.feature.values))
# first take out the hold-out set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=seed,
stratify=y)
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format(
model_name, '+'.join(features), '+'.join(scorers), n_splits,
int(test_ratio*100), param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
# gr, _ = process_gridsearch_results(
# gr_search, model_name, split_dim='kfold', features=X.feature.values)
# remove_digits = str.maketrans('', '', digits)
# features = list(set([x.translate(remove_digits).split('_')[0]
# for x in X.feature.values]))
# # add more attrs, features etc:
# gr.attrs['features'] = features
return gr_search
def select_doy_from_feature_list(X, model_name='RF', features='pwv'):
# first if RF chosen, replace the cyclic coords of DOY (sin and cos) with
# the DOY itself.
if isinstance(features, list):
feats = features.copy()
else:
feats = features
if model_name == 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(features, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(features, str):
feats = ['doy_sin']
feats.append('doy_cos')
X = select_features_from_X(X, feats)
return X
def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv',
n_splits=4, scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None, outer_split='1-1'):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X_val, model_name, features)
y = y_val
if param_grid == 'light':
print(np.unique(X.feature.values))
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(
n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
if outer_split == '1-1':
cv_type = 'holdout'
print('holdout cv is selected.')
else:
cv_type = 'nested'
print('nested cv {} out of {}.'.format(
outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type,
model_name, '+'.join(features), '+'.join(
scorers), n_splits,
outer_split, param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_cv_params_to_file(cv_obj, path, name):
import pandas as pd
di = vars(cv_obj)
splitter_type = cv_obj.__repr__().split('(')[0]
di['splitter_type'] = splitter_type
(pd.DataFrame.from_dict(data=di, orient='index')
.to_csv(path / '{}.csv'.format(name), header=False))
print('{}.csv saved to {}.'.format(name, path))
return
def read_cv_params_and_instantiate(filepath):
import pandas as pd
from sklearn.model_selection import StratifiedKFold
df = pd.read_csv(filepath, header=None, index_col=0)
d = {}
for row in df.iterrows():
dd = pd.to_numeric(row[1], errors='ignore')
if dd.item() == 'True' or dd.item() == 'False':
dd = dd.astype(bool)
d[dd.to_frame().columns.item()] = dd.item()
s_type = d.pop('splitter_type')
if s_type == 'StratifiedKFold':
cv = StratifiedKFold(**d)
return cv
def nested_cross_validation_procedure(X, y, model_name='SVC', features='pwv',
outer_splits=4, inner_splits=2,
refit_scorer='roc_auc',
scorers=['f1', 'recall', 'tss', 'hss',
'roc_auc', 'precision',
'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.inspection import permutation_importance
from string import digits
import numpy as np
import xarray as xr
assert refit_scorer in scorers
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
# if model_name == 'RF':
# doy = X['sample'].dt.dayofyear
# sel_doy = [x for x in X.feature.values if 'doy_sin' in x]
# doy_X = doy.broadcast_like(X.sel(feature=sel_doy))
# doy_X['feature'] = [
# 'doy_{}'.format(x) for x in range(
# doy_X.feature.size)]
# no_doy = [x for x in X.feature.values if 'doy' not in x]
# X = X.sel(feature=no_doy)
# X = xr.concat([X, doy_X], 'feature')
# else:
# # first slice X for features:
# if isinstance(features, str):
# f = [x for x in X.feature.values if features in x]
# X = X.sel(feature=f)
# elif isinstance(features, list):
# fs = []
# for f in features:
# fs += [x for x in X.feature.values if f in x]
# X = X.sel(feature=fs)
if param_grid == 'light':
print(np.unique(X.feature.values))
# configure the cross-validation procedure
cv_inner = StratifiedKFold(n_splits=inner_splits, shuffle=True,
random_state=seed)
print('Inner CV StratifiedKfolds of {}.'.format(inner_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
if param_grid == 'light':
print('disgnostic mode light.')
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv_inner, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=refit_scorer, return_train_score=True)
# gr.fit(X, y)
# configure the cross-validation procedure
cv_outer = StratifiedKFold(
n_splits=outer_splits, shuffle=True, random_state=seed)
# execute the nested cross-validation
scores_est_dict = cross_validate(gr_search, X, y,
scoring=scores_dict,
cv=cv_outer, n_jobs=n_jobs,
return_estimator=True, verbose=verbose)
# perm = []
# for i, (train, val) in enumerate(cv_outer.split(X, y)):
# gr_model = scores_est_dict['estimator'][i]
# gr_model.fit(X[train], y[train])
# r = permutation_importance(gr_model, X[val], y[val],scoring='f1',
# n_repeats=30, n_jobs=-1,
# random_state=0)
# perm.append(r)
# get the test scores:
test_keys = [x for x in scores_est_dict.keys() if 'test' in x]
ds = xr.Dataset()
for key in test_keys:
ds[key] = xr.DataArray(scores_est_dict[key], dims=['outer_kfold'])
preds_ds = []
gr_ds = []
for est in scores_est_dict['estimator']:
gr, _ = process_gridsearch_results(
est, model_name, split_dim='inner_kfold', features=X.feature.values)
# somehow save gr:
gr_ds.append(gr)
preds_ds.append(
grab_y_true_and_predict_from_sklearn_model(est, X, y, cv_inner))
# tpr_ds.append(produce_ROC_curves_from_model(est, X, y, cv_inner))
dss = xr.concat(preds_ds, 'outer_kfold')
gr_dss = xr.concat(gr_ds, 'outer_kfold')
dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
gr_dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
# aggragate results:
dss = xr.merge([ds, dss])
dss = xr.merge([dss, gr_dss])
dss.attrs = gr_dss.attrs
dss.attrs['outer_kfold_splits'] = outer_splits
remove_digits = str.maketrans('', '', digits)
features = list(set([x.translate(remove_digits).split('_')[0]
for x in X.feature.values]))
# add more attrs, features etc:
dss.attrs['features'] = features
# rename major data_vars with model name:
# ys = [x for x in dss.data_vars if 'y_' in x]
# new_ys = [y + '_{}'.format(model_name) for y in ys]
# dss = dss.rename(dict(zip(ys, new_ys)))
# new_test_keys = [y + '_{}'.format(model_name) for y in test_keys]
# dss = dss.rename(dict(zip(test_keys, new_test_keys)))
# if isinstance(X.attrs['pwv_id'], list):
# dss.attrs['pwv_id'] = '-'.join(X.attrs['pwv_id'])
# else:
# dss.attrs['pwv_id'] = X.attrs['pwv_id']
# if isinstance(y.attrs['hydro_station_id'], list):
# dss.attrs['hs_id'] = '-'.join([str(x) for x in y.attrs['hydro_station_id']])
# else:
# dss.attrs['hs_id'] = y.attrs['hydro_station_id']
# dss.attrs['hydro_max_flow'] = y.attrs['max_flow']
# dss.attrs['neg_pos_ratio'] = y.attrs['neg_pos_ratio']
# save results to file:
if savepath is not None:
save_cv_results(dss, savepath=savepath)
return dss
# def ML_main_procedure(X, y, estimator=None, model_name='SVC', features='pwv',
# val_size=0.18, n_splits=None, test_size=0.2, seed=42, best_score='f1',
# savepath=None, plot=True):
# """split the X,y for train and test, either do HP tuning using HP_tuning
# with val_size or use already tuned (or not) estimator.
# models to play with = MLP, RF and SVC.
# n_splits = 2, 3, 4.
# features = pwv, pressure.
# best_score = f1, roc_auc, accuracy.
# can do loop on them. RF takes the most time to tune."""
# X = select_features_from_X(X, features)
# X_train, X_test, y_train, y_test = train_test_split(X, y,
# test_size=test_size,
# shuffle=True,
# random_state=seed)
# # do HP_tuning:
# if estimator is None:
# cvr, model = HP_tuning(X_train, y_train, model_name=model_name, val_size=val_size, test_size=test_size,
# best_score=best_score, seed=seed, savepath=savepath, n_splits=n_splits)
# else:
# model = estimator
# if plot:
# ax = plot_many_ROC_curves(model, X_test, y_test, name=model_name,
# ax=None)
# return ax
# else:
# return model
def plot_hyper_parameters_heatmaps_from_nested_CV_model(dss, path=hydro_path, model_name='MLP',
features='pwv+pressure+doy', save=True):
import matplotlib.pyplot as plt
ds = dss.sel(features=features).reset_coords(drop=True)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
ds = ds[[x for x in ds if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
cmap_hp_dict = {
'alpha': seq, 'activation': cat,
'hidden_layer_sizes': cat, 'learning_rate': cat,
'solver': cat, 'kernel': cat, 'C': seq,
'gamma': seq, 'degree': seq, 'coef0': seq,
'max_depth': seq, 'max_features': cat,
'min_samples_leaf': seq, 'min_samples_split': seq,
'n_estimators': seq
}
# fix stuff for SVC:
if model_name == 'SVC':
ds['degree'] = ds['degree'].where(ds['kernel']=='poly')
ds['coef0'] = ds['coef0'].where(ds['kernel']=='poly')
# da = ds.to_arrray('hyper_parameters')
# fg = xr.plot.FacetGrid(
# da,
# col='hyper_parameters',
# sharex=False,
# sharey=False, figsize=(16, 10))
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 10))
for i, da in enumerate(ds):
df = ds[da].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df.index.name = 'Outer Split'
try:
df = df.astype(float).round(2)
except ValueError:
pass
cmap = cmap_hp_dict.get(da, 'Set1')
plot_heatmap_for_hyper_parameters_df(df, ax=axes[i], title=da, cmap=cmap)
fig.tight_layout()
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_heatmaps_for_hyper_parameters_data_splits(df1, df2, axes=None,
cmap='colorblind',
title=None, fig=None,
cbar_params=[.92, .12, .03, .75],
fontsize=12,
val_type='float'):
import pandas as pd
import seaborn as sns
import numpy as np
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
df1 = df1.astype(eval(val_type))
df2 = df2.astype(eval(val_type))
arr = pd.concat([df1, df2], axis=0).values.ravel()
value_to_int = {j: i for i, j in enumerate(
np.unique(arr))} # like you did
# try:
# sorted_v_to_i = dict(sorted(value_to_int.items()))
# except TypeError:
# sorted_v_to_i = value_to_int
# print(value_to_int)
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap_list = sns.color_palette(cmap, n)
if val_type == 'float':
# print([value_to_int.keys()])
cbar_ticklabels = ['{:.2g}'.format(x) for x in value_to_int.keys()]
elif val_type == 'int':
cbar_ticklabels = [int(x) for x in value_to_int.keys()]
elif val_type == 'str':
cbar_ticklabels = [x for x in value_to_int.keys()]
if 'nan' in value_to_int.keys():
cmap_list[-1] = (0.5, 0.5, 0.5)
new_value_to_int = {}
for key, val in value_to_int.items():
try:
new_value_to_int[str(int(float(key)))] = val
except ValueError:
new_value_to_int['NR'] = val
cbar_ticklabels = [x for x in new_value_to_int.keys()]
# u1 = np.unique(df1.replace(value_to_int)).astype(int)
# cmap1 = [cmap_list[x] for x in u1]
# u2 = np.unique(df2.replace(value_to_int)).astype(int)
# cmap2 = [cmap_list[x] for x in u2]
# prepare normalizer
## Prepare bins for the normalizer
norm_bins = np.sort([*value_to_int.values()]) + 0.5
norm_bins = np.insert(norm_bins, 0, np.min(norm_bins) - 1.0)
# print(norm_bins)
## Make normalizer and formatter
norm = matplotlib.colors.BoundaryNorm(norm_bins, n, clip=True)
# normalizer = Normalize(np.array([x for x in value_to_int.values()])[0],np.array([x for x in value_to_int.values()])[-1])
# im=cm.ScalarMappable(norm=normalizer)
if axes is None:
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False)
# divider = make_axes_locatable([axes[0], axes[1]])
# cbar_ax = divider.append_axes('right', size='5%', pad=0.05)
cbar_ax = fig.add_axes(cbar_params)
sns.heatmap(df1.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[0], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
sns.heatmap(df2.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[1], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
# else:
# ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
# ax=ax, linewidth=1, linecolor='k',
# square=False, cbar_kws={"shrink": .9})
if title is not None:
axes[0].set_title(title, fontsize=fontsize)
for ax in axes:
ax.set_xticklabels(ax.get_xticklabels(), ha='right', va='top', rotation=45)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize, direction='out', bottom=True,
left=True, length=2)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
# colorbar = axes[0].collections[0].colorbar
# diff = norm_bins[1:] - norm_bins[:-1]
# tickz = norm_bins[:-1] + diff / 2
colorbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.colors.ListedColormap(cmap_list)), ax=[axes[0], axes[1]],
shrink=1, pad=0.05, cax=cbar_ax)
# colorbar = plt.gca().images[-1].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.ax.set_yticklabels(cbar_ticklabels, fontsize=fontsize-2)
return axes
def plot_hyper_parameters_heatmap_data_splits_per_model(dss4, dss5, fontsize=14,
save=True, model_name='SVC',
features='pwv+pressure+doy'):
import matplotlib.pyplot as plt
# import seaborn as sns
fig, axes = plt.subplots(2, 5, sharex=True, sharey=False ,figsize=(16, 5))
ds4 = dss4.sel(features=features).reset_coords(drop=True)
ds5 = dss5.sel(features=features).reset_coords(drop=True)
ds4 = ds4.reindex(scorer=scorer_order)
ds5 = ds5.reindex(scorer=scorer_order)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
if model_name == 'MLP':
adj_dict=dict(
top=0.946,
bottom=0.145,
left=0.046,
right=0.937,
hspace=0.121,
wspace=0.652)
cb_st = 0.167
cb_mul = 0.193
else:
adj_dict=dict(
wspace = 0.477,
top=0.921,
bottom=0.17,
left=0.046,
right=0.937,
hspace=0.121)
cb_st = 0.18
cb_mul = 0.19
ds4 = ds4[[x for x in ds4 if x not in non_hp_vars]]
ds5 = ds5[[x for x in ds5 if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
hp_dict = {
'alpha': ['Reds', 'float'], 'activation': ['Set1_r', 'str'],
'hidden_layer_sizes': ['Paired', 'str'], 'learning_rate': ['Spectral_r', 'str'],
'solver': ['Dark2', 'str'], 'kernel': ['Dark2', 'str'], 'C': ['Blues', 'float'],
'gamma': ['Oranges', 'float'], 'degree': ['Greens', 'str'], 'coef0': ['Spectral', 'str'],
'max_depth': ['Blues', 'int'], 'max_features': ['Dark2', 'str'],
'min_samples_leaf': ['Greens', 'int'], 'min_samples_split': ['Reds', 'int'],
'n_estimators': ['Oranges', 'int']
}
# fix stuff for SVC:
if model_name == 'SVC':
ds4['degree'] = ds4['degree'].where(ds4['kernel']=='poly')
ds4['coef0'] = ds4['coef0'].where(ds4['kernel']=='poly')
ds5['degree'] = ds5['degree'].where(ds5['kernel']=='poly')
ds5['coef0'] = ds5['coef0'].where(ds5['kernel']=='poly')
for i, (da4, da5) in enumerate(zip(ds4, ds5)):
df4 = ds4[da4].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df5 = ds5[da5].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df4.index.name = 'Outer Split'
df5.index.name = 'Outer Split'
# try:
# df4 = df4.astype(float).round(2)
# df5 = df5.astype(float).round(2)
# except ValueError:
# pass
cmap = hp_dict.get(da4, 'Set1')[0]
val_type = hp_dict.get(da4, 'int')[1]
cbar_params = [cb_st + cb_mul*float(i), .175, .01, .71]
plot_heatmaps_for_hyper_parameters_data_splits(df4,
df5,
axes=[axes[0, i], axes[1, i]],
fig=fig,
title=da4,
cmap=cmap,
cbar_params=cbar_params,
fontsize=fontsize,
val_type=val_type)
if i > 0 :
axes[0, i].set_ylabel('')
axes[0, i].yaxis.set_tick_params(labelleft=False)
axes[1, i].set_ylabel('')
axes[1, i].yaxis.set_tick_params(labelleft=False)
fig.tight_layout()
fig.subplots_adjust(**adj_dict)
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_heatmap_for_hyper_parameters_df(df, ax=None, cmap='colorblind',
title=None, fontsize=12):
import pandas as pd
import seaborn as sns
import numpy as np
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
value_to_int = {j: i for i, j in enumerate(
sorted(pd.unique(df.values.ravel())))} # like you did
# for key in value_to_int.copy().keys():
# try:
# if np.isnan(key):
# value_to_int['NA'] = value_to_int.pop(key)
# df = df.fillna('NA')
# except TypeError:
# pass
try:
sorted_v_to_i = dict(sorted(value_to_int.items()))
except TypeError:
sorted_v_to_i = value_to_int
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap = sns.color_palette(cmap, n)
if ax is None:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
linewidth=1, linecolor='k', square=False,
cbar_kws={"shrink": .9})
else:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
ax=ax, linewidth=1, linecolor='k',
square=False, cbar_kws={"shrink": .9})
if title is not None:
ax.set_title(title, fontsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
colorbar = ax.collections[0].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(list(value_to_int.keys()))
return ax
# def plot_ROC_curves_for_all_models_and_scorers(dss, save=False,
# fontsize=24, fig_split=1,
# feat=['pwv', 'pwv+pressure', 'pwv+pressure+doy']):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# import pandas as pd
# cmap = sns.color_palette('tab10', len(feat))
# sns.set_style('whitegrid')
# sns.set_style('ticks')
# if fig_split == 1:
# dss = dss.sel(scorer=['precision', 'recall', 'f1'])
# elif fig_split == 2:
# dss = dss.sel(scorer=['accuracy', 'tss', 'hss'])
# fg = xr.plot.FacetGrid(
# dss,
# col='model',
# row='scorer',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = dss['model'].isel(model=j).item()
# scorer = dss['scorer'].isel(scorer=i).item()
# chance_plot = [False for x in feat]
# chance_plot[-1] = True
# for k, f in enumerate(feat):
# # name = '{}-{}-{}'.format(modelname, scoring, feat)
# # model = dss.isel({'model': j, 'scoring': i}).sel(
# # {'features': feat})
# model = dss.isel({'model': j, 'scorer': i}
# ).sel({'features': f})
# # return model
# title = 'ROC of {} model ({})'.format(modelname.replace('SVC', 'SVM'), scorer)
# try:
# ax = plot_ROC_curve_from_dss_nested_CV(model, outer_dim='outer_split',
# plot_chance=[k],
# main_label=f,
# ax=ax,
# color=cmap[k], title=title,
# fontsize=fontsize)
# except ValueError:
# ax.grid('on')
# continue
# handles, labels = ax.get_legend_handles_labels()
# lh_ser = pd.Series(labels, index=handles).drop_duplicates()
# lh_ser = lh_ser.sort_values(ascending=False)
# hand = lh_ser.index.values
# labe = lh_ser.values
# ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
# fontsize=fontsize-7)
# ax.grid('on')
# if j >= 1:
# ax.set_ylabel('')
# if fig_split == 1:
# ax.set_xlabel('')
# ax.tick_params(labelbottom=False)
# else:
# if i <= 1:
# ax.set_xlabel('')
# # title = '{} station: {} total events'.format(
# # station.upper(), events)
# # if max_flow > 0:
# # title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# # station.upper(), events, max_flow)
# # fg.fig.suptitle(title, fontsize=fontsize)
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# filename = 'ROC_curves_nested_{}_figsplit_{}.png'.format(
# dss['outer_split'].size, fig_split)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
# return fg
def plot_hydro_ML_models_results_from_dss(dss, std_on='outer',
save=False, fontsize=16,
plot_type='ROC', split=1,
feat=['pwv', 'pressure+pwv', 'doy+pressure+pwv']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
cmap = sns.color_palette("colorblind", len(feat))
if split == 1:
dss = dss.sel(scoring=['f1', 'precision', 'recall'])
elif split == 2:
dss = dss.sel(scoring=['tss', 'hss', 'roc-auc', 'accuracy'])
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
chance_plot = [False for x in feat]
chance_plot[-1] = True
for k, f in enumerate(feat):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = dss.isel({'model': j, 'scoring': i}).sel(
# {'features': feat})
model = dss.isel({'model': j, 'scoring': i}
).sel({'features': f})
title = '{} of {} model ({})'.format(
plot_type, modelname, scoring)
try:
plot_ROC_PR_curve_from_dss(model, outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=[k],
main_label=f, plot_type=plot_type,
plot_std_legend=False, ax=ax,
color=cmap[k], title=title,
std_on=std_on, fontsize=fontsize)
except ValueError:
ax.grid('on')
continue
handles, labels = ax.get_legend_handles_labels()
hand = pd.Series(
labels, index=handles).drop_duplicates().index.values
labe = pd.Series(labels, index=handles).drop_duplicates().values
ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
fontsize=14)
ax.grid('on')
# title = '{} station: {} total events'.format(
# station.upper(), events)
# if max_flow > 0:
# title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# station.upper(), events, max_flow)
# fg.fig.suptitle(title, fontsize=fontsize)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.054,
left=0.039,
right=0.993,
hspace=0.173,
wspace=0.051)
if save:
filename = 'hydro_models_on_{}_{}_std_on_{}_{}.png'.format(
dss['inner_kfold'].size, dss['outer_kfold'].size,
std_on, plot_type)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
# def plot_hydro_ML_models_result(model_da, nsplits=2, station='drag',
# test_size=20, n_splits_plot=None, save=False):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
# # TODO: add plot_roc_curve(model, X_other_station, y_other_station)
# # TODO: add pw_station, hs_id
# cmap = sns.color_palette("colorblind", 3)
# X, y = produce_X_y(station, hydro_pw_dict[station], neg_pos_ratio=1)
# events = int(y[y == 1].sum().item())
# model_da = model_da.sel(
# splits=nsplits,
# test_size=test_size).reset_coords(
# drop=True)
## just_pw = [x for x in X.feature.values if 'pressure' not in x]
## X_pw = X.sel(feature=just_pw)
# fg = xr.plot.FacetGrid(
# model_da,
# col='model',
# row='scoring',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = model_da['model'].isel(model=j).item()
# scoring = model_da['scoring'].isel(scoring=i).item()
# chance_plot = [False, False, True]
# for k, feat in enumerate(model_da['feature'].values):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = model_da.isel({'model': j, 'scoring': i}).sel({'feature': feat}).item()
# title = 'ROC of {} model ({})'.format(modelname, scoring)
# if not '+' in feat:
# f = [x for x in X.feature.values if feat in x]
# X_f = X.sel(feature=f)
# else:
# X_f = X
# X_train, X_test, y_train, y_test = train_test_split(
# X_f, y, test_size=test_size/100, shuffle=True, random_state=42)
#
# plot_many_ROC_curves(model, X_f, y, name=name,
# color=cmap[k], ax=ax,
# plot_chance=chance_plot[k],
# title=title, n_splits=n_splits_plot)
# fg.fig.suptitle('{} station: {} total_events, test_events = {}, n_splits = {}'.format(station.upper(), events, int(events* test_size/100), nsplits))
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# plt.savefig(savefig_path / 'try.png', bbox_inches='tight')
# return fg
def order_features_list(flist):
""" order the feature list in load_ML_run_results
so i don't get duplicates"""
import pandas as pd
import numpy as np
# first get all features:
li = [x.split('+') for x in flist]
flat_list = [item for sublist in li for item in sublist]
f = list(set(flat_list))
nums = np.arange(1, len(f)+1)
# now assagin a number for each entry:
inds = []
for x in flist:
for fe, num in zip(f, nums):
x = x.replace(fe, str(10**num))
inds.append(eval(x))
ser = pd.Series(inds)
ser.index = flist
ser1 = ser.drop_duplicates()
di = dict(zip(ser1.values, ser1.index))
new_flist = []
for ind, feat in zip(inds, flist):
new_flist.append(di.get(ind))
return new_flist
def smart_add_dataarray_to_ds_list(dsl, da_name='feature_importances'):
"""add data array to ds_list even if it does not exist, use shape of
data array that exists in other part of ds list"""
import numpy as np
import xarray as xr
# print(da_name)
fi = [x for x in dsl if da_name in x][0]
print(da_name, fi[da_name].shape)
fi = fi[da_name].copy(data=np.zeros(shape=fi[da_name].shape))
new_dsl = []
for ds in dsl:
if da_name not in ds:
ds = xr.merge([ds, fi], combine_attrs='no_conflicts')
new_dsl.append(ds)
return new_dsl
def load_ML_run_results(path=hydro_ml_path, prefix='CVR',
change_DOY_to_doy=True):
from aux_gps import path_glob
import xarray as xr
# from aux_gps import save_ncfile
import pandas as pd
import numpy as np
print('loading hydro ML results for all models and features')
# print('loading hydro ML results for station {}'.format(pw_station))
model_files = path_glob(path, '{}_*.nc'.format(prefix))
model_files = sorted(model_files)
# model_files = [x for x in model_files if pw_station in x.as_posix()]
ds_list = [xr.load_dataset(x) for x in model_files]
if change_DOY_to_doy:
for ds in ds_list:
if 'DOY' in ds.features:
new_feats = [x.replace('DOY', 'doy') for x in ds['feature'].values]
ds['feature'] = new_feats
ds.attrs['features'] = [x.replace('DOY', 'doy') for x in ds.attrs['features']]
model_as_str = [x.as_posix().split('/')[-1].split('.')[0]
for x in model_files]
model_names = [x.split('_')[1] for x in model_as_str]
model_scores = [x.split('_')[3] for x in model_as_str]
model_features = [x.split('_')[2] for x in model_as_str]
if change_DOY_to_doy:
model_features = [x.replace('DOY', 'doy') for x in model_features]
new_model_features = order_features_list(model_features)
ind = pd.MultiIndex.from_arrays(
[model_names,
new_model_features,
model_scores],
names=(
'model',
'features',
'scoring'))
# ind1 = pd.MultiIndex.from_product([model_names, model_scores, model_features], names=[
# 'model', 'scoring', 'feature'])
# ds_list = [x[data_vars] for x in ds_list]
# complete non-existant fields like best and fi for all ds:
data_vars = [x for x in ds_list[0] if x.startswith('test')]
# data_vars += ['AUC', 'TPR']
data_vars += [x for x in ds_list[0] if x.startswith('y_')]
bests = [[x for x in y if x.startswith('best')] for y in ds_list]
data_vars += list(set([y for x in bests for y in x]))
if 'RF' in model_names:
data_vars += ['feature_importances']
new_ds_list = []
for dvar in data_vars:
ds_list = smart_add_dataarray_to_ds_list(ds_list, dvar)
# # check if all data vars are in each ds and merge them:
new_ds_list = [xr.merge([y[x] for x in data_vars if x in y],
combine_attrs='no_conflicts') for y in ds_list]
# concat all
dss = xr.concat(new_ds_list, dim='dim_0')
dss['dim_0'] = ind
dss = dss.unstack('dim_0')
# dss.attrs['pwv_id'] = pw_station
# fix roc_auc to roc-auc in dss datavars
dss = dss.rename_vars({'test_roc_auc': 'test_roc-auc'})
# dss['test_roc_auc'].name = 'test_roc-auc'
print('calculating ROC, PR metrics.')
dss = calculate_metrics_from_ML_dss(dss)
print('Done!')
return dss
def plot_nested_CV_test_scores(dss, feats=None, fontsize=16,
save=True, wv_label='pwv'):
import seaborn as sns
import matplotlib.pyplot as plt
from aux_gps import convert_da_to_long_form_df
import numpy as np
import xarray as xr
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
splits = dss['outer_split'].size
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
except KeyError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['test_score'].to_dataframe()
# df['scorer'] = df.index.get_level_values(3)
# df['model'] = df.index.get_level_values(0)
# df['features'] = df.index.get_level_values(1)
# df['outer_splits'] = df.index.get_level_values(2)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars='test_score', id_vars=[
# 'features', 'model', 'scorer', 'outer_splits'], var_name='test_score',
# value_name='score')
da = dst['test_score']
if len(feats) == 5:
da_empty = da.isel(features=0).copy(
data=np.zeros(da.isel(features=0).shape))
da_empty['features'] = 'empty'
da = xr.concat([da, da_empty], 'features')
da = da.reindex(features=['doy', 'pressure', 'pwv',
'empty', 'pwv+pressure', 'pwv+pressure+doy'])
da.name = 'feature groups'
df = convert_da_to_long_form_df(da, value_name='score',
var_name='feature groups')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = sns.color_palette('tab10', n_colors=len(feats))
if len(feats) == 5:
cmap = ['tab:purple', 'tab:brown', 'tab:blue', 'tab:blue',
'tab:orange', 'tab:green']
fg = sns.FacetGrid(data=df, row='model', col='scorer', height=4, aspect=0.9)
# fg.map_dataframe(sns.stripplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, alpha=1, zorder=1, palette=cmap)
# fg.map_dataframe(sns.pointplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, join=False, palette=cmap,
# markers="o", scale=.75, ci=None)
fg.map_dataframe(sns.barplot, x='feature groups', y="score", hue='features',
ci='sd', capsize=None, errwidth=2, errcolor='k',
palette=cmap, dodge=True)
# g = sns.catplot(x='test_score', y="score", hue='features',
# col="scorer", row='model', ci='sd',
# data=df, kind="bar", capsize=0.25,
# height=4, aspect=1.5, errwidth=1.5)
#fg.set_xticklabels(rotation=45)
# fg.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
fg.set_ylabels('score')
[x.grid(True) for x in fg.axes.flatten()]
handles, labels = fg.axes[0, 0].get_legend_handles_labels()
if len(feats) == 5:
del handles[3]
del labels[3]
show_values_on_bars(fg.axes, fs=fontsize-4, exclude_bar_num=3)
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
scorer = dss['scorer'].isel(scorer=j).item()
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel('')
ax.set_ylim(0, 1)
change_width(ax, 0.110)
fg.set_xlabels(' ')
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=len(feats), fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'ML_scores_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ML_scores_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_holdout_test_scores(dss, feats='pwv+pressure+doy'):
import seaborn as sns
import matplotlib.pyplot as plt
def show_values_on_bars(axs, fs=12, fw='bold'):
import numpy as np
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center", fontsize=fs, fontweight=fw)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
df = dst['holdout_test_scores'].to_dataframe()
df['scorer'] = df.index.droplevel(1).droplevel(0)
df['model'] = df.index.droplevel(2).droplevel(1)
df['features'] = df.index.droplevel(2).droplevel(0)
df['model'] = df['model'].str.replace('SVC', 'SVM')
df = df.melt(value_vars='holdout_test_scores', id_vars=[
'features', 'model', 'scorer'], var_name='test_score')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
g = sns.catplot(x="model", y="value", hue='features',
col="scorer", ci='sd', row=None,
col_wrap=3,
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5, errwidth=0.8)
g.set_xticklabels(rotation=45)
[x.grid(True) for x in g.axes.flatten()]
show_values_on_bars(g.axes)
filename = 'ML_scores_models_holdout_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def prepare_test_df_to_barplot_from_dss(dss, feats='doy+pwv+pressure',
plot=True, splitfigs=True):
import seaborn as sns
import matplotlib.pyplot as plt
dvars = [x for x in dss if 'test_' in x]
scores = [x.split('_')[-1] for x in dvars]
dst = dss[dvars]
# dst['scoring'] = [x+'_inner' for x in dst['scoring'].values]
# for i, ds in enumerate(dst):
# dst[ds] = dst[ds].sel(scoring=scores[i]).reset_coords(drop=True)
if feats is None:
feats = ['pwv', 'pressure+pwv', 'doy+pressure+pwv']
dst = dst.sel(features=feats) # .reset_coords(drop=True)
dst = dst.rename_vars(dict(zip(dvars, scores)))
# dst = dst.drop('scoring')
df = dst.to_dataframe()
# dfu = df
df['inner score'] = df.index.droplevel(2).droplevel(1).droplevel(0)
df['features'] = df.index.droplevel(2).droplevel(2).droplevel(1)
df['model'] = df.index.droplevel(2).droplevel(0).droplevel(1)
df = df.melt(value_vars=scores, id_vars=[
'features', 'model', 'inner score'], var_name='outer score')
# return dfu
# dfu.columns = dfu.columns.droplevel(1)
# dfu = dfu.T
# dfu['score'] = dfu.index
# dfu = dfu.reset_index()
# df = dfu.melt(value_vars=['MLP', 'RF', 'SVC'], id_vars=['score'])
df1 = df[(df['inner score']=='f1') | (df['inner score']=='precision') | (df['inner score']=='recall')]
df2 = df[(df['inner score']=='hss') | (df['inner score']=='tss') | (df['inner score']=='roc-auc') | (df['inner score']=='accuracy')]
if plot:
sns.set(font_scale = 1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
if splitfigs:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df1, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_1.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df2, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_2.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def calculate_metrics_from_ML_dss(dss):
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
import xarray as xr
import numpy as np
import pandas as pd
mean_fpr = np.linspace(0, 1, 100)
# fpr = dss['y_true'].copy(deep=False).values
# tpr = dss['y_true'].copy(deep=False).values
# y_true = dss['y_true'].values
# y_prob = dss['y_prob'].values
ok = [x for x in dss['outer_kfold'].values]
ik = [x for x in dss['inner_kfold'].values]
m = [x for x in dss['model'].values]
sc = [x for x in dss['scoring'].values]
f = [x for x in dss['features'].values]
# r = [x for x in dss['neg_pos_ratio'].values]
ind = pd.MultiIndex.from_product(
[ok, ik, m, sc, f],
names=[
'outer_kfold',
'inner_kfold',
'model',
'scoring',
'features']) # , 'station'])
okn = [x for x in range(dss['outer_kfold'].size)]
ikn = [x for x in range(dss['inner_kfold'].size)]
mn = [x for x in range(dss['model'].size)]
scn = [x for x in range(dss['scoring'].size)]
fn = [x for x in range(dss['features'].size)]
ds_list = []
for i in okn:
for j in ikn:
for k in mn:
for n in scn:
for m in fn:
ds = xr.Dataset()
y_true = dss['y_true'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_prob = dss['y_prob'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_true = y_true.dropna('sample')
y_prob = y_prob.dropna('sample')
if y_prob.size == 0:
# in case of NaNs in the results:
fpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
fpr_da['sample'] = [
x for x in range(fpr_da.size)]
tpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
tpr_da['sample'] = [
x for x in range(tpr_da.size)]
prn_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
prn_da['sample'] = [
x for x in range(prn_da.size)]
rcll_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
rcll_da['sample'] = [
x for x in range(rcll_da.size)]
tpr_fpr = xr.DataArray(
np.nan*np.ones((100)), dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
np.nan*np.ones((100)), dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
pr_auc_da = xr.DataArray(np.nan)
roc_auc_da = xr.DataArray(np.nan)
no_skill_da = xr.DataArray(np.nan)
else:
no_skill = len(
y_true[y_true == 1]) / len(y_true)
no_skill_da = xr.DataArray(no_skill)
fpr, tpr, _ = roc_curve(y_true, y_prob)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_true, y_prob)
prn, rcll, _ = precision_recall_curve(
y_true, y_prob)
interp_prn = np.interp(
mean_fpr, rcll[::-1], prn[::-1])
interp_prn[0] = 1.0
pr_auc_score = auc(rcll, prn)
roc_auc_da = xr.DataArray(roc_auc)
pr_auc_da = xr.DataArray(pr_auc_score)
prn_da = xr.DataArray(prn, dims=['sample'])
prn_da['sample'] = [x for x in range(len(prn))]
rcll_da = xr.DataArray(rcll, dims=['sample'])
rcll_da['sample'] = [
x for x in range(len(rcll))]
fpr_da = xr.DataArray(fpr, dims=['sample'])
fpr_da['sample'] = [x for x in range(len(fpr))]
tpr_da = xr.DataArray(tpr, dims=['sample'])
tpr_da['sample'] = [x for x in range(len(tpr))]
tpr_fpr = xr.DataArray(
interp_tpr, dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
interp_prn, dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
ds['fpr'] = fpr_da
ds['tpr'] = tpr_da
ds['roc-auc'] = roc_auc_da
ds['pr-auc'] = pr_auc_da
ds['prn'] = prn_da
ds['rcll'] = rcll_da
ds['TPR'] = tpr_fpr
ds['PRN'] = prn_rcll
ds['no_skill'] = no_skill_da
ds_list.append(ds)
ds = xr.concat(ds_list, 'dim_0')
ds['dim_0'] = ind
ds = ds.unstack()
ds.attrs = dss.attrs
ds['fpr'].attrs['long_name'] = 'False positive rate'
ds['tpr'].attrs['long_name'] = 'True positive rate'
ds['prn'].attrs['long_name'] = 'Precision'
ds['rcll'].attrs['long_name'] = 'Recall'
ds['roc-auc'].attrs['long_name'] = 'ROC or FPR-TPR Area under curve'
ds['pr-auc'].attrs['long_name'] = 'Precition-Recall Area under curve'
ds['PRN'].attrs['long_name'] = 'Precision-Recall'
ds['TPR'].attrs['long_name'] = 'TPR-FPR (ROC)'
dss = xr.merge([dss, ds], combine_attrs='no_conflicts')
return dss
#
# def load_ML_models(path=hydro_ml_path, station='drag', prefix='CVM', suffix='.pkl'):
# from aux_gps import path_glob
# import joblib
# import matplotlib.pyplot as plt
# import seaborn as sns
# import xarray as xr
# import pandas as pd
# model_files = path_glob(path, '{}_*{}'.format(prefix, suffix))
# model_files = sorted(model_files)
# model_files = [x for x in model_files if station in x.as_posix()]
# m_list = [joblib.load(x) for x in model_files]
# model_files = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files]
# # fix roc-auc:
# model_files = [x.replace('roc_auc', 'roc-auc') for x in model_files]
# print('loading {} station only.'.format(station))
# model_names = [x.split('_')[3] for x in model_files]
## model_pw_stations = [x.split('_')[1] for x in model_files]
## model_hydro_stations = [x.split('_')[2] for x in model_files]
# model_nsplits = [x.split('_')[6] for x in model_files]
# model_scores = [x.split('_')[5] for x in model_files]
# model_features = [x.split('_')[4] for x in model_files]
# model_test_sizes = []
# for file in model_files:
# try:
# model_test_sizes.append(int(file.split('_')[7]))
# except IndexError:
# model_test_sizes.append(20)
## model_pwv_hs_id = list(zip(model_pw_stations, model_hydro_stations))
## model_pwv_hs_id = ['_'.join(x) for filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
# name, features, refitted_scorer, ikfolds, okfolds)
# x in model_pwv_hs_id]
# # transform model_dict to dataarray:
# tups = [tuple(x) for x in zip(model_names, model_scores, model_nsplits, model_features, model_test_sizes)] #, model_pwv_hs_id)]
# ind = pd.MultiIndex.from_tuples((tups), names=['model', 'scoring', 'splits', 'feature', 'test_size']) #, 'station'])
# da = xr.DataArray(m_list, dims='dim_0')
# da['dim_0'] = ind
# da = da.unstack('dim_0')
# da['splits'] = da['splits'].astype(int)
# da['test_size'].attrs['units'] = '%'
# return da
def plot_heatmaps_for_all_models_and_scorings(dss, var='roc-auc'): # , save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
# assert station == dss.attrs['pwv_id']
cmaps = {'roc-auc': sns.color_palette("Blues", as_cmap=True),
'pr-auc': sns.color_palette("Greens", as_cmap=True)}
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(10, 20))
dss = dss.mean('inner_kfold', keep_attrs=True)
vmin, vmax = dss[var].min(), 1
norm = plt.Normalize(vmin=vmin, vmax=vmax)
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
model = dss[var].isel(
{'model': j, 'scoring': i}).reset_coords(drop=True)
df = model.to_dataframe()
title = '{} model ({})'.format(modelname, scoring)
df = df.unstack()
mean = df.mean()
mean.name = 'mean'
df = df.append(mean).T.droplevel(0)
ax = sns.heatmap(df, annot=True, cmap=cmaps[var], cbar=False,
ax=ax, norm=norm)
ax.set_title(title)
ax.vlines([4], 0, 10, color='r', linewidth=2)
if j > 0:
ax.set_ylabel('')
if i < 2:
ax.set_xlabel('')
cax = fg.fig.add_axes([0.1, 0.025, .8, .015])
fg.fig.colorbar(ax.get_children()[0], cax=cax, orientation="horizontal")
fg.fig.suptitle('{}'.format(
dss.attrs[var].upper()), fontweight='bold')
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.099,
left=0.169,
right=0.993,
hspace=0.173,
wspace=0.051)
# if save:
# filename = 'hydro_models_heatmaps_on_{}_{}_{}.png'.format(
# station, dss['outer_kfold'].size, var)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_ROC_from_dss(dss, feats=None, fontsize=16, save=True, wv_label='pwv',
best=False):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
sns.set(font_scale=1.0)
cmap = sns.color_palette('tab10', n_colors=3)
splits = dss['outer_split'].size
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['TPR'].to_dataframe()
# if 'neg_sample' in dss.dims:
# fpr_lnum = 5
# model_lnum = 0
# scorer_lnum = 4
# features_lnum = 1
# else:
# fpr_lnum = 4
# model_lnum = 0
# scorer_lnum = 3
# features_lnum = 1
# df['FPR'] = df.index.get_level_values(fpr_lnum)
# df['model'] = df.index.get_level_values(model_lnum)
# df['scorer'] = df.index.get_level_values(scorer_lnum)
# df['features'] = df.index.get_level_values(features_lnum)
df = convert_da_to_long_form_df(dst['TPR'], var_name='score')
# df = df.melt(value_vars='TPR', id_vars=[
# 'features', 'model', 'scorer', 'FPR'], var_name='score')
if best is not None:
if best == 'compare_negs':
df1 = df.copy()[df['neg_sample'] == 1]
df2 = df.copy()
df2.drop('neg_sample', axis=1, inplace=True)
df1.drop('neg_sample', axis=1, inplace=True)
df1['neg_group'] = 1
df2['neg_group'] = 25
df = pd.concat([df1, df2])
col = 'neg_group'
titles = ['Neg=1', 'Neg=25']
else:
col=None
else:
col = 'scorer'
df['model'] = df['model'].str.replace('SVC', 'SVM')
fg = sns.FacetGrid(df, col=col, row='model', aspect=1)
fg.map_dataframe(sns.lineplot, x='FPR', y='value',
hue='features', ci='sd', palette=cmap, n_boot=None,
estimator='mean')
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
auc_model = dst.sel(model=model)
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
if j == 0 and best is not None:
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).isel(neg_sample=0).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
ax = fg.axes[i, j]
ax.plot([0, 1], [0, 1], color='tab:red', linestyle='--', lw=2,
label='chance')
if best is not None:
if best == 'compare_negs':
title = '{} | {}'.format(model, titles[j])
else:
title = '{}'.format(model)
else:
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
handles, labels = ax.get_legend_handles_labels()
hands = handles[0:3]
# labes = labels[0:3]
new_labes = []
for auc, auc_sd in zip(auc_mean, auc_std):
l = r'{:.2}$\pm${:.1}'.format(auc, auc_sd)
new_labes.append(l)
ax.legend(handles=hands, labels=new_labes, loc='lower right',
title='AUCs', prop={'size': fontsize-4})
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.grid(True)
# return handles, labels
fg.set_ylabels('True Positive Rate', fontsize=fontsize)
fg.set_xlabels('False Positive Rate', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
if best is not None:
if best == 'compare_negs':
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=2, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.865,
bottom=0.079,
left=0.144,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=1, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.825,
bottom=0.079,
left=0.184,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.915)
if save:
if best is not None:
filename = 'ROC_plots_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ROC_plots_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_permutation_importances_from_dss(dss, feat_dim='features',
outer_dim='outer_split',
features='pwv+pressure+doy',
fix_xticklabels=True,split=1,
axes=None, save=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
sns.set_style('whitegrid')
sns.set_style('ticks')
model = dss.attrs['model']
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
# dss = dss.mean(outer_dim)
dss = dss.sel({outer_dim: split})
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [2, 5, 5]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(sorted(feats)):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['PI_mean'].sel(
feature=fe).reset_coords(
drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
dsf = dsf.reindex(feature=sorted_feat)
print([x for x in dsf.feature.values])
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True)
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f})'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 10}, loc='upper left')
axes[i].set_ylabel('Scores')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24, -24+n)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
fig.tight_layout()
fig.suptitle('permutation importance scores for {} model split #{}'.format(model, split))
fig.subplots_adjust(top=0.904)
if save:
filename = 'permutation_importances_{}_split_{}_all_scorers_{}.png'.format(model, split, features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_from_dss(
dss,
feat_dim='features', outer_dim='outer_split',
features='pwv+pressure+doy', fix_xticklabels=True,
axes=None, save=True, ylim=[0, 12], fontsize=16):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sns.set_theme(style='ticks', font_scale=1.5)
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
dss = dss.mean(outer_dim)
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [5, 5, 2]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe).reset_coords(
drop=True)
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
# sorted_feat = [x for x in dsf.feature.values]
print(sorted_feat)
dsf = dsf.reindex(feature=sorted_feat)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True) * 100
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
axes[i].set_title(title, fontsize=fontsize)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 12}, loc='upper center')
axes[i].set_ylabel('Feature importances [%]')
axes[i].grid(axis='y', zorder=1)
if ylim is not None:
[ax.set_ylim(*ylim) for ax in axes]
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[2].xaxis.set_ticklabels('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[2].tick_params(labelsize=fontsize)
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
if save:
filename = 'RF_feature_importances_all_scorers_{}.png'.format(features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances(
dss,
feat_dim='features',
features='pwv+pressure+doy',
scoring='f1', fix_xticklabels=True,
axes=None, save=True):
# use dss.sel(model='RF') first as input
import matplotlib.pyplot as plt
import numpy as np
dss = dss.sel({feat_dim: features})
tests_ds = dss[[x for x in dss if 'test' in x]]
tests_ds = tests_ds.sel(scoring=scoring)
score_ds = tests_ds['test_{}'.format(scoring)]
max_score = score_ds.idxmax('outer_kfold').values
feats = features.split('+')
fn = len(feats)
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': [1, 4, 4]})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe,
outer_kfold=max_score).reset_coords(
drop=True)
dsf = dsf.to_dataset('scoring').to_dataframe(
).reset_index(drop=True) * 100
title = '{} ({})'.format(f.upper(), scoring)
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 8})
axes[i].set_ylabel('Feature importance [%]')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24,0)
axes[1].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
if save:
fig.tight_layout()
filename = 'RF_feature_importances_{}.png'.format(scoring)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_for_all_scorings(dss,
features='doy+pwv+pressure',
model='RF', splitfigs=True):
import matplotlib.pyplot as plt
# station = dss.attrs['pwv_id'].upper()
dss = dss.sel(model=model).reset_coords(drop=True)
fns = len(features.split('+'))
scores = dss['scoring'].values
scores1 = ['f1', 'precision', 'recall']
scores2 = ['hss', 'tss', 'accuracy','roc-auc']
if splitfigs:
fig, axes = plt.subplots(len(scores1), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores1):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_1.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
fig, axes = plt.subplots(len(scores2), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores2):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_2.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
fig, axes = plt.subplots(len(scores), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return dss
def plot_ROC_curve_from_dss_nested_CV(dss, outer_dim='outer_split',
plot_chance=True, color='tab:blue',
fontsize=14, plot_legend=True,
title=None,
ax=None, main_label=None):
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).values
mean_auc = dss['roc_auc_score'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc_auc_score'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
if main_label is None:
main_label = r'Mean ROC (AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)
textstr = '\n'.join(['{}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=3, alpha=.8, label=main_label)
std_tpr = dss[field].std(outer_dim).values
n = dss[outer_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8, zorder=206)
stdlabel = r'$\pm$ 1 Std. dev.'
stdstr = '\n'.join(['{}'.format(stdlabel), r'({} outer splits)'.format(n)])
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=stdstr)
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
return ax
def plot_ROC_PR_curve_from_dss(
dss,
outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=True,
ax=None,
color='b',
title=None,
std_on='inner',
main_label=None,
fontsize=14,
plot_type='ROC',
plot_std_legend=True):
"""plot classifier metrics, plot_type=ROC or PR"""
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
if plot_type == 'ROC':
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['roc-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc-auc'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
elif plot_type == 'PR':
mean_fpr = dss['RCLL'].values
mean_tpr = dss['PRN'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['pr-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['pr-auc'].std().item()
no_skill = dss['no_skill'].mean(outer_dim).mean(inner_dim).item()
field = 'PRN'
xlabel = 'Recall'
ylabel = 'Precision'
# plot mean ROC:
if main_label is None:
main_label = r'Mean {} (AUC={:.2f}$\pm${:.2f})'.format(
plot_type, mean_auc, std_auc)
else:
textstr = '\n'.join(['Mean ROC {}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=2, alpha=.8, label=main_label)
if std_on == 'inner':
std_tpr = dss[field].mean(outer_dim).std(inner_dim).values
n = dss[inner_dim].size
elif std_on == 'outer':
std_tpr = dss[field].mean(inner_dim).std(outer_dim).values
n = dss[outer_dim].size
elif std_on == 'all':
std_tpr = dss[field].stack(
dumm=[inner_dim, outer_dim]).std('dumm').values
n = dss[outer_dim].size * dss[inner_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
if plot_type == 'ROC':
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
elif plot_type == 'PR':
ax.plot([0, 1], [no_skill, no_skill], linestyle='--', color='r',
lw=2, label='No Skill', alpha=.8)
# plot ROC STD range:
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=r'$\pm$ 1 std. dev. ({} {} splits)'.format(n, std_on))
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
# handles, labels = ax.get_legend_handles_labels()
# if not plot_std_legend:
# if len(handles) == 7:
# handles = handles[:-2]
# labels = labels[:-2]
# else:
# handles = handles[:-1]
# labels = labels[:-1]
# ax.legend(handles=handles, labels=labels, loc="lower right",
# fontsize=fontsize)
return ax
def load_cv_splits_from_pkl(savepath):
import joblib
from aux_gps import path_glob
file = path_glob(savepath, 'CV_inds_*.pkl')[0]
n_splits = int(file.as_posix().split('/')[-1].split('_')[2])
shuffle = file.as_posix().split('/')[-1].split('.')[0].split('=')[-1]
cv_dict = joblib.load(file)
spl = len([x for x in cv_dict.keys()])
assert spl == n_splits
print('loaded {} with {} splits.'.format(file, n_splits))
return cv_dict
def save_cv_splits_to_dict(X, y, cv, train_key='train', test_key='test',
savepath=None):
import joblib
cv_dict = {}
for i, (train, test) in enumerate(cv.split(X, y)):
cv_dict[i+1] = {train_key: train, test_key: test}
# check for completness:
all_train = [x['train'] for x in cv_dict.values()]
flat_train = set([item for sublist in all_train for item in sublist])
all_test = [x['test'] for x in cv_dict.values()]
flat_test = set([item for sublist in all_test for item in sublist])
assert flat_test == flat_train
if savepath is not None:
filename = 'CV_inds_{}_splits_shuffle={}.pkl'.format(cv.n_splits, cv.shuffle)
joblib.dump(cv_dict, savepath / filename)
print('saved {} to {}.'.format(filename, savepath))
return cv_dict
def plot_many_ROC_curves(model, X, y, name='', color='b', ax=None,
plot_chance=True, title=None, n_splits=None):
from sklearn.metrics import plot_roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
from sklearn.model_selection import StratifiedKFold
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
# just plot the ROC curve for X, y, no nsplits and stats:
if n_splits is None:
viz = plot_roc_curve(model, X, y, color=color, ax=ax, name=name)
else:
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
# y_score = model.fit(X[train], y[train]).predict_proba(X[val])[:, 1]
y_pred = model.predict(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
# viz = plot_roc_curve(model, X[val], y[val],
# name='ROC fold {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
# fpr = viz.fpr
# tpr = viz.tpr
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
# scores.append(f1_score(y[val], y_pred))
# scores = np.array(scores)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (
mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title=title)
ax.legend(loc="lower right")
return ax
def HP_tuning(X, y, model_name='SVC', val_size=0.18, n_splits=None,
test_size=None,
best_score='f1', seed=42, savepath=None):
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
""" do HP tuning with ML_Classfier_Switcher object and return a DataSet of
results. note that the X, y are already after split to val/test"""
# first get the features from X:
features = list(set(['_'.join(x.split('_')[0:2])
for x in X['feature'].values]))
ml = ML_Classifier_Switcher()
sk_model = ml.pick_model(model_name)
param_grid = ml.param_grid
if n_splits is None and val_size is not None:
n_splits = int((1 // val_size) - 1)
elif val_size is not None and n_splits is not None:
raise('Both val_size and n_splits are defined, choose either...')
print('StratifiedKfolds of {}.'.format(n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
gr = GridSearchCV(estimator=sk_model, param_grid=param_grid, cv=cv,
n_jobs=-1, scoring=['f1', 'roc_auc', 'accuracy'], verbose=1,
refit=best_score, return_train_score=True)
gr.fit(X, y)
if best_score is not None:
ds, best_model = process_gridsearch_results(gr, model_name,
features=features, pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
else:
ds = process_gridsearch_results(gr, model_name, features=features,
pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
best_model = None
if savepath is not None:
save_cv_results(ds, best_model=best_model, savepath=savepath)
return ds, best_model
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def run_RF_feature_importance_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
da = holdout_test(model_name='RF', return_RF_FI=True, features=feat)
feat_list.append(da)
daa = xr.concat(feat_list, 'features')
daa['features'] = feats
return daa
def load_nested_CV_test_results_from_all_models(path=hydro_ml_path, best=False,
neg=1, splits=4,
permutation=False):
from aux_gps import path_glob
import xarray as xr
if best:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_best_hp_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_best_hp_neg_{}_{}a.nc'.format(neg, splits)
else:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_neg_{}_{}a.nc'.format(neg, splits)
files = path_glob(path, file_str)
print(files)
models = [x.as_posix().split('/')[-1].split('_')[4] for x in files]
print('loading CV test results only for {} models'.format(', '.join(models)))
dsl = [xr.load_dataset(x) for x in files]
if not permutation:
dsl = [x[['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR']] for x in dsl]
dss = xr.concat(dsl, 'model')
dss['model'] = models
return dss
# def plot_all_permutation_test_results(dss, feats=None):
# import xarray as xr
# fg = xr.plot.FacetGrid(
# dss,
# col='scorer',
# row='model',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# model = dss['model'].isel(model=i).item()
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# scorer = dss['scorer'].isel(scorer=j).item()
# ax = plot_single_permutation_test_result(dss, feats=feats,
# scorer=scorer,
# model=model,
# ax=ax)
# fg.fig.tight_layout()
# return fg
def plot_permutation_test_results_from_dss(dss, feats=None, fontsize=14,
save=True, wv_label='pwv'):
# ax=None, scorer='f1', model='MLP'):
import matplotlib.pyplot as plt
import seaborn as sns
from PW_from_gps_figures import get_legend_labels_handles_title_seaborn_histplot
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
try:
splits = dss['outer_split'].size
except KeyError:
splits = 5
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.reindex(scorer=scorer_order)
# dss = dss.mean('outer_split')
cmap = sns.color_palette('tab10', n_colors=3)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst[['permutation_score', 'true_score', 'pvalue']].to_dataframe()
# df['permutations'] = df.index.get_level_values(2)
# df['scorer'] = df.index.get_level_values(3)
# df['features'] = df.index.get_level_values(0)
# df['model'] = df.index.get_level_values(1)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars=['permutation_score', 'true_score', 'pvalue'], id_vars=[
# 'features', 'model', 'scorer'], var_name='scores')
df = convert_da_to_long_form_df(dst[['permutation_score', 'true_score', 'pvalue']], var_name='scores')
df_p = df[df['scores'] == 'permutation_score']
df_pval = df[df['scores'] == 'pvalue']
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 8))
fg = sns.FacetGrid(df_p, col='scorer', row='model', legend_out=True,
sharex=False)
fg.map_dataframe(sns.histplot, x="value", hue="features",
legend=True, palette=cmap,
stat='density', kde=True,
element='bars', bins=10)
# pvals = dst.sel(scorer=scorer, model=model)[
# 'pvalue'].reset_coords(drop=True)
# pvals = pvals.values
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
# new_labels = []
# for pval, label in zip(pvals, labels):
# label += ' (p={:.1})'.format(pval)
# new_labels.append(label)
# ax.legend(handles, new_labels, title=title)
df_t = df[df['scores'] == 'true_score']
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
df_model = df_t[df_t['model'] == model]
df_pval_model = df_pval[df_pval['model'] == model]
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
df1 = df_model[df_model['scorer'] == scorer]
df2 = df_pval_model[df_pval_model['scorer'] == scorer]
ax = fg.axes[i, j]
ymax = ax.get_ylim()[-1] - 0.2
plabels = []
for k, feat in enumerate(feats):
val = df1[df1['features']==feat]['value'].unique().item()
pval = df2[df2['features']==feat]['value'].unique().item()
plabels.append('pvalue: {:.2g}'.format(pval))
# print(i, val, feat, scorer, model)
ax.axvline(x=val, ymin=0, ymax=ymax, linestyle='--', color=cmap[k],
label=feat)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles, labels=plabels,
prop={'size': fontsize-4}, loc='upper left')
if 'hss' in scorer or 'tss' in scorer:
ax.set_xlim(-0.35, 1)
else:
ax.set_xlim(0.15, 1)
# ax.set_xticks([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1])
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
if model == 'SVC':
model = 'SVM'
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
# ax.set_xlim(-0.3, 1)
fg.set_ylabels('Density', fontsize=fontsize)
fg.set_xlabels('Score', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'permutation_test_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'permutation_test_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def run_CV_nested_tests_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'nested4',
verbose=False, model_name='SVC', params=None,
savepath=None, drop_hours=None, PI=30, Ptest=None,
suffix=None, sample_from_negatives=1):
"""returns the nested CV test results for all scorers, features and models,
if model is chosen, i.e., model='MLP', returns just this model results
and its hyper-parameters per each outer split"""
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
from aux_gps import save_ncfile
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
print('Running CV on feature {}'.format(feat))
ds = CV_test_after_GridSearchCV(path=path, gr_path=gr_path,
model_name=model_name, params=params,
features=feat, PI=PI, Ptest=Ptest,
verbose=verbose, drop_hours=drop_hours,
sample_from_negatives=sample_from_negatives)
feat_list.append(ds)
dsf = xr.concat(feat_list, 'features')
dsf['features'] = feats
dss = dsf
dss.attrs['model'] = model_name
if Ptest is not None:
filename = 'nested_CV_test_results_{}_all_features_permutation_tests'.format(model_name)
else:
filename = 'nested_CV_test_results_{}_all_features_with_hyper_params'.format(model_name)
if params is not None:
dss.attrs['comment'] = 'using best hyper parameters for all features and outer splits'
filename += '_best_hp'
filename += '_neg_{}'.format(sample_from_negatives)
if suffix is not None:
filename += '_{}'.format(suffix)
filename += '.nc'
if savepath is not None:
save_ncfile(dss, savepath, filename)
return dss
def run_holdout_test_on_all_models_and_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
models = ['MLP', 'SVC', 'RF']
model_list = []
model_list2 = []
for model in models:
feat_list = []
feat_list2 = []
for feat in feats:
best, roc = holdout_test(path=path, gr_path=gr_path,
model_name=model, features=feat)
best.index.name = 'scorer'
ds = best[['mean_score', 'std_score', 'holdout_test_scores']].to_xarray()
roc.index.name = 'FPR'
roc_da = roc.to_xarray().to_array('scorer')
feat_list.append(ds)
feat_list2.append(roc_da)
dsf = xr.concat(feat_list, 'features')
dsf2 = xr.concat(feat_list2, 'features')
dsf['features'] = feats
dsf2['features'] = feats
model_list.append(dsf)
model_list2.append(dsf2)
dss = xr.concat(model_list, 'model')
rocs = xr.concat(model_list2, 'model')
dss['model'] = models
rocs['model'] = models
dss['roc'] = rocs
return dss
def prepare_X_y_for_holdout_test(features='pwv+doy', model_name='SVC',
path=hydro_path, drop_hours=None,
negative_samples=1):
# combine X,y and split them according to test ratio and seed:
X, y = combine_pos_neg_from_nc_file(path, negative_sample_num=negative_samples)
# re arange X features according to model:
feats = features.split('+')
if model_name == 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(feats, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(feats, str):
feats = ['doy_sin']
feats.append('doy_cos')
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(select_features_from_X(X1, feats))
X = Xs
else:
X = select_features_from_X(X, feats)
if drop_hours is not None:
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(drop_hours_in_pwv_pressure_features(X1, drop_hours,
verbose=True))
X = Xs
else:
X = drop_hours_in_pwv_pressure_features(X, drop_hours, verbose=True)
return X, y
def CV_test_after_GridSearchCV(path=hydro_path, gr_path=hydro_ml_path/'nested4',
model_name='SVC', features='pwv', params=None,
verbose=False, drop_hours=None, PI=None,
Ptest=None, sample_from_negatives=1):
"""do cross_validate with all scorers on all gridsearchcv folds,
reads the nested outer splits CV file in gr_path"""
import xarray as xr
import numpy as np
# cv = read_cv_params_and_instantiate(gr_path/'CV_outer.csv')
cv_dict = load_cv_splits_from_pkl(gr_path)
if verbose:
print(cv_dict)
param_df_dict = load_one_gridsearchcv_object(path=gr_path,
cv_type='nested',
features=features,
model_name=model_name,
verbose=verbose)
Xs, ys = prepare_X_y_for_holdout_test(features, model_name, path,
drop_hours=drop_hours,
negative_samples=sample_from_negatives)
bests = []
for i, negative_sample in enumerate(np.arange(1, sample_from_negatives + 1)):
print('running with negative sample #{} out of {}'.format(
negative_sample, sample_from_negatives))
if isinstance(Xs, list):
X = Xs[i]
y = ys[i]
else:
X = Xs
y = ys
if Ptest is not None:
print('Permutation Test is in progress!')
ds = run_permutation_classifier_test(X, y, 5, param_df_dict, Ptest=Ptest,
params=params,
model_name=model_name, verbose=verbose)
return ds
if params is not None:
if verbose:
print('running with custom hyper parameters: ', params)
outer_bests = []
outer_rocs = []
fis = []
pi_means = []
pi_stds = []
n_splits = len([x for x in cv_dict.keys()])
for split, tt in cv_dict.items():
X_train = X[tt['train']]
y_train = y[tt['train']]
X_test = X[tt['test']]
y_test = y[tt['test']]
outer_split = '{}-{}'.format(split, n_splits)
# for i, (train_index, test_index) in enumerate(cv.split(X, y)):
# X_train = X[train_index]
# y_train = y[train_index]
# X_test = X[test_index]
# y_test = y[test_index]
# outer_split = '{}-{}'.format(i+1, cv.n_splits)
best_params_df = param_df_dict.get(outer_split)
if params is not None:
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_params_df.index:
best_params_df.at[ind, key] = value
else:
best_params_df[key] = value
if model_name == 'RF':
if PI is not None:
bdf, roc, fi, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
else:
bdf, roc, fi = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
fis.append(fi)
else:
if PI is not None:
bdf, roc, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
else:
bdf, roc = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
if PI is not None:
pi_means.append(pi_mean)
pi_stds.append(pi_std)
bdf.index.name = 'scorer'
roc.index.name = 'FPR'
if 'hidden_layer_sizes' in bdf.columns:
bdf['hidden_layer_sizes'] = bdf['hidden_layer_sizes'].astype(str)
bdf_da = bdf.to_xarray()
roc_da = roc.to_xarray().to_array('scorer')
roc_da.name = 'TPR'
outer_bests.append(bdf_da)
outer_rocs.append(roc_da)
best_da = xr.concat(outer_bests, 'outer_split')
roc_da = xr.concat(outer_rocs, 'outer_split')
best = xr.merge([best_da, roc_da])
best['outer_split'] = np.arange(1, n_splits + 1)
if model_name == 'RF':
fi_da = xr.concat(fis, 'outer_split')
best['feature_importances'] = fi_da
if PI is not None:
pi_mean_da = xr.concat(pi_means, 'outer_split')
pi_std_da = xr.concat(pi_stds, 'outer_split')
best['PI_mean'] = pi_mean_da
best['PI_std'] = pi_std_da
bests.append(best)
if len(bests) == 1:
return bests[0]
else:
best_ds = xr.concat(bests, 'neg_sample')
best_ds['neg_sample'] = np.arange(1, sample_from_negatives + 1)
return best_ds
def run_permutation_classifier_test(X, y, cv, best_params_df, Ptest=100,
model_name='SVC', verbose=False, params=None):
from sklearn.model_selection import permutation_test_score
import xarray as xr
import numpy as np
def run_one_permutation_test(X=X, y=y, cv=cv, bp_df=best_params_df,
model_name=model_name, n_perm=Ptest,
verbose=verbose):
true_scores = []
pvals = []
perm_scores = []
for scorer in bp_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
b_params = bp_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, b_params))
true, perm_scrs, pval = permutation_test_score(sk_model, X, y,
cv=cv,
n_permutations=Ptest,
scoring=scorers(scorer),
random_state=0,
n_jobs=-1)
true_scores.append(true)
pvals.append(pval)
perm_scores.append(perm_scrs)
true_da = xr.DataArray(true_scores, dims=['scorer'])
true_da['scorer'] = [x for x in bp_df.index.values]
true_da.name = 'true_score'
pval_da = xr.DataArray(pvals, dims=['scorer'])
pval_da['scorer'] = [x for x in bp_df.index.values]
pval_da.name = 'pvalue'
perm_da = xr.DataArray(perm_scores, dims=['scorer', 'permutations'])
perm_da['scorer'] = [x for x in bp_df.index.values]
perm_da['permutations'] = np.arange(1, Ptest+1)
perm_da.name = 'permutation_score'
ds = xr.merge([true_da, pval_da, perm_da])
return ds
ml = ML_Classifier_Switcher()
if params is not None:
best_p_df = best_params_df['1-{}'.format(len(best_params_df))]
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_p_df.index:
best_p_df.at[ind, key] = value
else:
best_p_df[key] = value
dss = run_one_permutation_test(bp_df=best_p_df)
else:
if verbose:
print('Picking {} model with best params'.format(model_name))
splits = []
for i, df in enumerate(best_params_df.values()):
if verbose:
print('running on split #{}'.format(i+1))
ds = run_one_permutation_test()
splits.append(ds)
dss = xr.concat(splits, dim='outer_split')
dss['outer_split'] = np.arange(1, len(best_params_df)+ 1)
return dss
def run_test_on_CV_split(X_train, y_train, X_test, y_test, param_df,
model_name='SVC', verbose=False, PI=None,
Ptest=None):
import numpy as np
import xarray as xr
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.inspection import permutation_importance
best_df = param_df.copy()
ml = ML_Classifier_Switcher()
if verbose:
print('Picking {} model with best params'.format(model_name))
# print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
pi_mean_list = []
pi_std_list = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
# print(X_train['feature'])
# input('press any key')
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
if PI is not None:
pi = permutation_importance(sk_model, X_test, y_test,
n_repeats=PI,
scoring=scorers(scorer),
random_state=0, n_jobs=-1)
pi_mean = xr.DataArray(pi['importances_mean'], dims='feature')
pi_std = xr.DataArray(pi['importances_std'], dims='feature')
pi_mean.name = 'PI_mean'
pi_std.name = 'PI_std'
pi_mean['feature'] = X_train['feature']
pi_std['feature'] = X_train['feature']
pi_mean_list.append(pi_mean)
pi_std_list.append(pi_std)
if PI is not None:
pi_mean_da = xr.concat(pi_mean_list, 'scorer')
pi_std_da = xr.concat(pi_std_list, 'scorer')
pi_mean_da['scorer'] = [x for x in best_df.index.values]
pi_std_da['scorer'] = [x for x in best_df.index.values]
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['test_score'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if hasattr(sk_model, 'feature_importances_'):
fi = xr.concat(fi_list, 'scorer')
fi['scorer'] = [x for x in best_df.index.values]
if PI is not None:
return best_df, roc_df, fi, pi_mean_da, pi_std_da
else:
return best_df, roc_df, fi
elif PI is not None:
return best_df, roc_df, pi_mean_da, pi_std_da
else:
return best_df, roc_df
def holdout_test(path=hydro_path, gr_path=hydro_ml_path/'holdout',
model_name='SVC', features='pwv', return_RF_FI=False,
verbose=False):
"""do a holdout test with best model from gridsearchcv
with all scorers"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import xarray as xr
import pandas as pd
import numpy as np
# process gridsearchcv results:
best_df, test_ratio, seed = load_one_gridsearchcv_object(path=gr_path,
cv_type='holdout',
features=features,
model_name=model_name,
verbose=False)
print('Using random seed of {} and {}% test ratio'.format(seed, test_ratio))
ts = int(test_ratio) / 100
X, y = prepare_X_y_for_holdout_test(features, model_name, path)
# split using test_size and seed:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=ts,
random_state=int(seed),
stratify=y)
if verbose:
print('y train pos/neg:{}, {}'.format((y_train==1).sum().item(),(y_train==0).sum().item()))
print('y test pos/neg:{}, {}'.format((y_test==1).sum().item(),(y_test==0).sum().item()))
# pick model and set the params to best from gridsearchcv:
ml = ML_Classifier_Switcher()
print('Picking {} model with best params'.format(model_name))
print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['holdout_test_scores'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if fi_list and return_RF_FI:
da = xr.concat(fi_list, 'scorer')
da['scorer'] = best_df.index.values
da.name = 'RF_feature_importances'
return da
return best_df, roc_df
def load_one_gridsearchcv_object(path=hydro_ml_path, cv_type='holdout', features='pwv',
model_name='SVC', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_gps import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model with {} cv type'.format(model_name, cv_type))
model_files = path_glob(path, 'GRSRCHCV_{}_*.pkl'.format(cv_type))
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
if verbose:
print('loading GridsearchCVs results with {} features'.format(features))
model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
if len(feat_ind) > 1:
if verbose:
print('found {} GR objects.'.format(len(feat_ind)))
files = sorted([model_files[x] for x in feat_ind])
outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
grs = [joblib.load(x) for x in files]
best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
di = dict(zip(outer_splits, best_dfs))
return di
else:
file = model_files[feat_ind]
seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(file)
best_df = read_one_gridsearchcv_object(gr)
return best_df, outer_splits, seed
def get_feature_set_from_list(model_features_list, features, sep='+'):
"""select features from model_features_list,
return the index in the model_features_list and the entry itself"""
# first find if features is a single or multiple features:
if isinstance(features, str) and sep not in features:
try:
ind = [i for i, e in enumerate(model_features_list) if e == features]
# ind = model_features_list.index(features)
except ValueError:
raise ValueError('{} is not in {}'.format(features, ', '.join(model_features_list)))
elif isinstance(features, str) and sep in features:
features_split = features.split(sep)
mf = [x.split(sep) for x in model_features_list]
bool_list = [set(features_split) == (set(x)) for x in mf]
ind = [i for i, x in enumerate(bool_list) if x]
# print(len(ind))
# ind = ind[0]
# feat = model_features_list[ind]
# feat = model_features_list[ind]
return ind
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
# # param grid dict:
# params = gr.param_grid
# # scorer names:
# scoring = [x for x in gr.scoring.keys()]
# # df:
# df = pd.DataFrame().from_dict(gr.cv_results_)
# # produce multiindex from param_grid dict:
# param_names = [x for x in params.keys()]
# # unpack param_grid vals to list of lists:
# pro = [[y for y in x] for x in params.values()]
# ind = pd.MultiIndex.from_product((pro), names=param_names)
# df.index = ind
# best_params = []
# best_mean_scores = []
# best_std_scores = []
# for scorer in scoring:
# best_params.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].index[0])
# best_mean_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].iloc[0])
# best_std_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['std_test_{}'.format(scorer)].iloc[0])
# best_df = pd.DataFrame(best_params, index=scoring, columns=param_names)
# best_df['mean_score'] = best_mean_scores
# best_df['std_score'] = best_std_scores
# return best_df, best_df_1
def process_gridsearch_results(GridSearchCV, model_name,
split_dim='inner_kfold', features=None,
pwv_id=None, hs_id=None, test_size=None):
import xarray as xr
import pandas as pd
import numpy as np
# finish getting best results from all scorers togather
"""takes GridSreachCV object with cv_results and xarray it into dataarray"""
params = GridSearchCV.param_grid
scoring = GridSearchCV.scoring
results = GridSearchCV.cv_results_
# for scorer in scoring:
# for sample in ['train', 'test']:
# sample_score_mean = results['mean_{}_{}'.format(sample, scorer)]
# sample_score_std = results['std_{}_{}'.format(sample, scorer)]
# best_index = np.nonzero(results['rank_test_{}'.format(scorer)] == 1)[0][0]
# best_score = results['mean_test_{}'.format(scorer)][best_index]
names = [x for x in params.keys()]
# unpack param_grid vals to list of lists:
pro = [[y for y in x] for x in params.values()]
ind = pd.MultiIndex.from_product((pro), names=names)
# result_names = [x for x in GridSearchCV.cv_results_.keys() if 'split'
# not in x and 'time' not in x and 'param' not in x and
# 'rank' not in x]
result_names = [
x for x in results.keys() if 'param' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(results[da_name])
ds[da_name] = da
ds = ds.assign(dim_0=ind).unstack('dim_0')
for dim in ds.dims:
if ds[dim].dtype == 'O':
try:
ds[dim] = ds[dim].astype(str)
except ValueError:
ds = ds.assign_coords({dim: [str(x) for x in ds[dim].values]})
if ('True' in ds[dim]) and ('False' in ds[dim]):
ds[dim] = ds[dim] == 'True'
# get all splits data and concat them along number of splits:
all_splits = [x for x in ds.data_vars if 'split' in x]
train_splits = [x for x in all_splits if 'train' in x]
test_splits = [x for x in all_splits if 'test' in x]
# loop over scorers:
trains = []
tests = []
for scorer in scoring:
train_splits_scorer = [x for x in train_splits if scorer in x]
trains.append(xr.concat([ds[x]
for x in train_splits_scorer], split_dim))
test_splits_scorer = [x for x in test_splits if scorer in x]
tests.append(xr.concat([ds[x] for x in test_splits_scorer], split_dim))
splits_scorer = np.arange(1, len(train_splits_scorer) + 1)
train_splits = xr.concat(trains, 'scoring')
test_splits = xr.concat(tests, 'scoring')
# splits = [x for x in range(len(train_splits))]
# train_splits = xr.concat([ds[x] for x in train_splits], 'split')
# test_splits = xr.concat([ds[x] for x in test_splits], 'split')
# replace splits data vars with newly dataarrays:
ds = ds[[x for x in ds.data_vars if x not in all_splits]]
ds['split_train_score'] = train_splits
ds['split_test_score'] = test_splits
ds[split_dim] = splits_scorer
if isinstance(scoring, list):
ds['scoring'] = scoring
elif isinstance(scoring, dict):
ds['scoring'] = [x for x in scoring.keys()]
ds.attrs['name'] = 'CV_results'
ds.attrs['param_names'] = names
ds.attrs['model_name'] = model_name
ds.attrs['{}_splits'.format(split_dim)] = ds[split_dim].size
if GridSearchCV.refit:
if hasattr(GridSearchCV.best_estimator_, 'feature_importances_'):
f_import = xr.DataArray(
GridSearchCV.best_estimator_.feature_importances_,
dims=['feature'])
f_import['feature'] = features
ds['feature_importances'] = f_import
ds['best_score'] = GridSearchCV.best_score_
# ds['best_model'] = GridSearchCV.best_estimator_
ds.attrs['refitted_scorer'] = GridSearchCV.refit
for name in names:
if isinstance(GridSearchCV.best_params_[name], tuple):
GridSearchCV.best_params_[name] = ','.join(
map(str, GridSearchCV.best_params_[name]))
ds['best_{}'.format(name)] = GridSearchCV.best_params_[name]
return ds, GridSearchCV.best_estimator_
else:
return ds, None
def save_cv_results(cvr, savepath=hydro_path):
from aux_gps import save_ncfile
features = '+'.join(cvr.attrs['features'])
# pwv_id = cvr.attrs['pwv_id']
# hs_id = cvr.attrs['hs_id']
# neg_pos_ratio = cvr.attrs['neg_pos_ratio']
ikfolds = cvr.attrs['inner_kfold_splits']
okfolds = cvr.attrs['outer_kfold_splits']
name = cvr.attrs['model_name']
refitted_scorer = cvr.attrs['refitted_scorer'].replace('_', '-')
# filename = 'CVR_{}_{}_{}_{}_{}_{}_{}_{}.nc'.format(pwv_id, hs_id,
# name, features, refitted_scorer, ikfolds, okfolds, neg_pos_ratio)
filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
name, features, refitted_scorer, ikfolds, okfolds)
save_ncfile(cvr, savepath, filename)
return
def scikit_fit_predict(X, y, seed=42, with_pressure=True, n_splits=7,
plot=True):
# step1: CV for train/val (80% from 80-20 test). display results with
# model and scores(AUC, f1), use StratifiedKFold
# step 2: use validated model with test (20%) and build ROC curve
# step 3: add features (pressure) but check for correlation
# check permutations with scikit learn
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.metrics import plot_roc_curve
from sklearn.svm import SVC
from numpy import interp
from sklearn.metrics import auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
if not with_pressure:
just_pw = [x for x in X.feature.values if 'pressure' not in x]
X = X.sel(feature=just_pw)
X_tt, X_test, y_tt, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed)
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
# cv = LeaveOneOut()
classifier = SVC(kernel='rbf', probability=False,
random_state=seed)
# classifier = LinearDiscriminantAnalysis()
# clf = QuadraticDiscriminantAnalysis()
scores = []
fig, ax = plt.subplots()
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X_tt, y_tt)):
# for i in range(100):
# X_train, X_val, y_train, y_val = train_test_split(
# X_tt, y_tt, shuffle=True, test_size=0.5, random_state=i)
# clf.fit(X_train, y_train)
classifier.fit(X_tt[train], y_tt[train])
# viz = plot_roc_curve(clf, X_val, y_val,
# name='ROC run {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
viz = plot_roc_curve(classifier, X_tt[val], y_tt[val],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
# aucs.append(viz.roc_auc)
# y_pred = clf.predict(X_val)
y_pred = classifier.predict(X_tt[val])
aucs.append(roc_auc_score(y_tt[val], y_pred))
# scores.append(clf.score(X_val, y_val))
scores.append(f1_score(y_tt[val], y_pred))
scores = np.array(scores)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
ax.set_title(
'ROC curve for KFold={}, with pressure anomalies.'.format(n_splits))
if not with_pressure:
ax.set_title(
'ROC curve for KFold={}, without pressure anomalies.'.format(n_splits))
y_test_predict = classifier.predict(X_test)
print('final test predict score:')
print(f1_score(y_test, y_test_predict))
if plot:
plt.figure()
plt.hist(scores, bins=15, edgecolor='k')
return scores
# clf.fit(X,y)
def produce_X_y_from_list(pw_stations=['drag', 'dsea', 'elat'],
hs_ids=[48125, 48199, 60170],
pressure_station='bet-dagan', max_flow=0,
window=25, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path,
concat_Xy=False):
if isinstance(hs_ids, int):
hs_ids = [hs_ids for x in range(len(pw_stations))]
kwargs = locals()
[kwargs.pop(x) for x in ['pw_stations', 'hs_ids', 'concat_Xy']]
Xs = []
ys = []
for pw_station, hs_id in list(zip(pw_stations, hs_ids)):
X, y = produce_X_y(pw_station, hs_id, **kwargs)
Xs.append(X)
ys.append(y)
if concat_Xy:
print('concatenating pwv stations {}, with hydro_ids {}.'.format(
pw_stations, hs_ids))
X, y = concat_X_y(Xs, ys)
return X, y
else:
return Xs, ys
def concat_X_y(Xs, ys):
import xarray as xr
import pandas as pd
X_attrs = [x.attrs for x in Xs]
X_com_attrs = dict(zip(pd.DataFrame(X_attrs).T.index.values,
pd.DataFrame(X_attrs).T.values.tolist()))
y_attrs = [x.attrs for x in ys]
y_com_attrs = dict(zip( | pd.DataFrame(y_attrs) | pandas.DataFrame |
import pandas as pd
import numpy as np
import altair as alt
import altair_saver
import glob
import os
import copy
import collections
import traceback
import json
# ---------------- Plot themes ------------------------
def personal():
return {
'config': {
'font': 'sans-serif',
'view': {
'height': 300,
'width': 400,
},
'range': {
'category': {'scheme': 'set2'},
'ordinal': {'scheme': 'plasma'},
},
'legend': {
'labelLimit': 0,
},
'background': 'white',
'mark': {
'clip': True,
},
'line': {
'size': 3,
# 'opacity': 0.4
},
}
}
def publication():
stroke_color = '333'
title_size = 24
label_size = 20
line_width = 5
return {
'config': {
'font': 'sans-serif',
'view': {
'height': 500,
'width': 600,
'strokeWidth': 0,
'background': 'white',
},
'title': {
'fontSize': title_size,
},
'range': {
'category': {'scheme': 'set2'},
'ordinal': {'scheme': 'plasma'},
},
'axis': {
'titleFontSize': title_size,
'labelFontSize': label_size,
'grid': False,
'domainWidth': 5,
'domainColor': stroke_color,
'tickWidth': 3,
'tickSize': 9,
'tickCount': 4,
'tickColor': stroke_color,
'tickOffset': 0,
},
'legend': {
'titleFontSize': title_size,
'labelFontSize': label_size,
'labelLimit': 0,
'titleLimit': 0,
'orient': 'top-left',
# 'padding': 10,
'titlePadding': 10,
# 'rowPadding': 5,
'fillColor': '#ffffff88',
# 'strokeColor': 'black',
'cornerRadius': 0,
},
'rule': {
'size': 3,
'color': '999',
# 'strokeDash': [4, 4],
},
'line': {
'size': line_width,
# 'opacity': 0.4
},
}
}
alt.themes.register('personal', personal)
alt.themes.register('publication', publication)
# ----------- Data loading -----------------------------
def load_args(path):
with open(path + '/args.json') as f:
args = json.load(f)
return args
def merge_args(df, args_dict):
df = df.copy()
for k, v in args_dict.items():
df[k] = v
return df
def load_jobs(pattern, subdir='exploration', root='.', title=None):
jobs = glob.glob(f'{root}/results/{subdir}/{pattern}')
results = []
for job in jobs:
try:
name = os.path.basename(os.path.normpath(job))
train_data = pd.read_csv(job + '/train.csv')
train_data['test'] = False
test_data = pd.read_csv(job + '/test.csv')
test_data['test'] = True
data = pd.concat([train_data, test_data], sort=False)
data['name'] = name
args_dict = load_args(job)
data = merge_args(data, args_dict)
results.append(data)
except Exception as e:
print(e)
df = | pd.concat(results, sort=False) | pandas.concat |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
| assert_series_equal(empty, empty2) | pandas.util.testing.assert_series_equal |
# BUG: Regression on DataFrame.from_records #42456
from numpy import (
array,
empty,
)
import pandas as pd
print(pd.__version__)
structured_dtype = [("prop", int)]
# Does NOT work any more
result = empty((0, len(structured_dtype)))
structured_array = array(result, dtype=structured_dtype)
result = | pd.DataFrame.from_records(structured_array) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 22 14:50:25 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import LSTM, Bidirectional, GRU
from keras.layers.recurrent import LSTM
from sklearn.utils import shuffle
import seaborn as sns
import matplotlib.pyplot as plt
import math
data1 = pd.read_csv("B05_birlestirilmis.csv")
data2 = pd.read_csv("B07_birlestirilmis.csv")
data3 = pd.read_csv("B18_birlestirilmis.csv")
data4 = pd.read_csv("B33_birlestirilmis.csv")
data5 = pd.read_csv("B34_birlestirilmis.csv")
data6 = pd.read_csv("B46_birlestirilmis.csv")
data7 = pd.read_csv("B47_birlestirilmis.csv")
data8 = pd.read_csv("B48_birlestirilmis.csv")
X1=data1.iloc[:,0:31]
Y1=data1.iloc[:,30:31]
X2=data2.iloc[:,0:31]
Y2=data2.iloc[:,30:31]
X3=data3.iloc[:,0:31]
Y3=data3.iloc[:,30:31]
X4=data4.iloc[:,0:31]
Y4=data4.iloc[:,30:31]
X5=data5.iloc[:,0:31]
Y5=data5.iloc[:,30:31]
X6=data6.iloc[:,0:31]
Y6=data6.iloc[:,30:31]
X7=data7.iloc[:,0:31]
Y7=data7.iloc[:,30:31]
X8=data8.iloc[:,0:31]
Y8=data8.iloc[:,30:31]
#verilerin egitim ve test icin bolunmesi
from sklearn.model_selection import train_test_split
trX1, teX1,trY1,teY1 = train_test_split(X1,Y1,test_size=0.20, random_state=0)
trX2, teX2,trY2,teY2 = train_test_split(X2,Y2,test_size=0.20, random_state=0)
trX3, teX3,trY3,teY3 = train_test_split(X3,Y3,test_size=0.20, random_state=0)
trX4, teX4,trY4,teY4 = train_test_split(X4,Y4,test_size=0.20, random_state=0)
trX5, teX5,trY5,teY5 = train_test_split(X5,Y5,test_size=0.20, random_state=0)
trX6, teX6,trY6,teY6 = train_test_split(X6,Y6,test_size=0.20, random_state=0)
trX7, teX7,trY7,teY7 = train_test_split(X7,Y7,test_size=0.20, random_state=0)
trX8, teX8,trY8,teY8 = train_test_split(X8,Y8,test_size=0.20, random_state=0)
tesX1=pd.DataFrame(teX1).sort_index()
tesY1=pd.DataFrame(teY1).sort_index()
tesX2=pd.DataFrame(teX2).sort_index()
tesY2=pd.DataFrame(teY2).sort_index()
tesX3=pd.DataFrame(teX3).sort_index()
tesY3=pd.DataFrame(teY3).sort_index()
tesX4=pd.DataFrame(teX4).sort_index()
tesY4=pd.DataFrame(teY4).sort_index()
tesX5=pd.DataFrame(teX5).sort_index()
tesY5=pd.DataFrame(teY5).sort_index()
tesX6=pd.DataFrame(teX6).sort_index()
tesY6=pd.DataFrame(teY6).sort_index()
tesX7=pd.DataFrame(teX7).sort_index()
tesY7=pd.DataFrame(teY7).sort_index()
tesX8=pd.DataFrame(teX8).sort_index()
tesY8=pd.DataFrame(teY8).sort_index()
trainX1=pd.DataFrame(trX1).sort_index()
trainY1=pd.DataFrame(trY1).sort_index()
trainX2=pd.DataFrame(trX2).sort_index()
trainY2=pd.DataFrame(trY2).sort_index()
trainX3=pd.DataFrame(trX3).sort_index()
trainY3=pd.DataFrame(trY3).sort_index()
trainX4=pd.DataFrame(trX4).sort_index()
trainY4=pd.DataFrame(trY4).sort_index()
trainX5=pd.DataFrame(trX5).sort_index()
trainY5=pd.DataFrame(trY5).sort_index()
trainX6=pd.DataFrame(trX6).sort_index()
trainY6=pd.DataFrame(trY6).sort_index()
trainX7=pd.DataFrame(trX7).sort_index()
trainY7=pd.DataFrame(trY7).sort_index()
trainX8=pd.DataFrame(trX8).sort_index()
trainY8= | pd.DataFrame(trY8) | pandas.DataFrame |
from dask import delayed
from dask.distributed import Client, LocalCluster
from dask_jobqueue import SLURMCluster
import glob
import pickle
import numpy as np
import scipy.stats
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from metric_hse import HSEMetric
cluster = SLURMCluster(memory='2g',
cores=1,
queue='short',
walltime="03:00:00",
job_extra=['--job-name="simworker"', "--output=/users/d/m/dmatthe1/job_logs/dask-%x-%A.txt"])
cluster.scale(30)
client = Client(cluster)
def investigate(x, y):
"""x and y are observations of X and Y"""
assert x.shape == y.shape, "Can't do mutual information on observations of different length"
xy = np.c_[x, y] # a faster way of doing xy = zip(x,y) and turn to array
vals_x, counts_x = np.unique(x, return_counts=True, axis=0)
vals_y, counts_y = np.unique(y, return_counts=True, axis=0)
vals_xy, counts_xy = np.unique(xy, return_counts=True, axis=0)
# H(X)
Hx = scipy.stats.entropy(counts_x, base=2)
# H(Y)
Hy = scipy.stats.entropy(counts_y, base=2)
# H(X,Y)
Hxy = scipy.stats.entropy(counts_xy, base=2)
# H(Y|X)
Hy_given_x = Hxy - Hx
# H(X|Y)
Hx_given_y = Hxy - Hy
# I(X;Y)
MI_xy = Hy - Hy_given_x
return (min(Hx, Hy), Hx + Hy , Hx, Hy, Hxy, Hy_given_x, Hx_given_y, MI_xy, MI_xy/min(Hx, Hy))
def process_data_HSE(fname):
seed_id = int(fname[fname.find("steps_")+6:-6])
# read from disk
data = pickle.load(open(fname, "rb"))
# reshape to (time, vid, states)
data = data.reshape((data.shape[0], -1, 4))
n_steps_full = data.shape[0]
n_steps = 100
step_size = n_steps_full//n_steps
entropies = np.zeros(shape=(n_steps, 3))
entropies[:, 0] = seed_id
h = HSEMetric(None)
row_idx = 0
for row_idx in range(n_steps):
entropies[row_idx, 1] = row_idx*step_size
entropies[row_idx, 2] = h.get_metric_no_world(data[row_idx*step_size, :, :2])["HSE"]
return entropies
def process_data_MI(fname, nbins):
# read from disk
data = pickle.load(open(fname, "rb"))
# reshape to (time, vid, states)
data = data.reshape((data.shape[0], -1, 4))
# bin to nbins
binned_data = (data * nbins).astype(np.int)
velocity_binned_data = binned_data[:,:,2]
n_vehicles = velocity_binned_data.shape[1]
all_entropies = np.zeros(shape=((n_vehicles*(n_vehicles-1))//2, 12))
row_id = 0
seed_id = int(fname[fname.find("steps_")+6:-6])
for v_id_a in range(n_vehicles):
x_series = velocity_binned_data[:, v_id_a]
for v_id_b in range(v_id_a+1, n_vehicles):
y_series = velocity_binned_data[:, v_id_b]
row_dat = investigate(x_series, y_series)
all_entropies[row_id, 3:] = row_dat
all_entropies[row_id, :3] = (seed_id, v_id_a, v_id_b)
row_id += 1
return all_entropies, np.mean(all_entropies, axis=0)
def process_data_PI(fname, nbins):
# read from disk
data = pickle.load(open(fname, "rb"))
# reshape to (time, vid, states)
data = data.reshape((data.shape[0], -1, 4))
# bin to nbins
binned_data = (data * nbins).astype(np.int)
velocity_binned_data = binned_data[:,:,2]
n_vehicles = velocity_binned_data.shape[1]
n_steps = velocity_binned_data.shape[0]
all_entropies = np.zeros(shape=(n_vehicles, 11))
row_id = 0
seed_id = int(fname[fname.find("steps_")+6:-6])
for v_id_a in range(n_vehicles):
x_series = velocity_binned_data[:n_steps//2, v_id_a]
y_series = velocity_binned_data[n_steps//2:, v_id_a]
assert len(x_series) == len(y_series)
row_dat = investigate(x_series, y_series)
all_entropies[row_id, 2:] = row_dat
all_entropies[row_id, :2] = (seed_id, v_id_a)
row_id += 1
return all_entropies, np.mean(all_entropies, axis=0)
def process_data_PI_temporal(fname, nbins):
# read from disk
data = pickle.load(open(fname, "rb"))
# reshape to (time, vid, states)
data = data.reshape((data.shape[0], -1, 4))
# bin to nbins
binned_data = (data * nbins).astype(np.int)
velocity_binned_data = binned_data[:,:,2]
n_vehicles = velocity_binned_data.shape[1]
n_steps = velocity_binned_data.shape[0]
history_length = 1000
data_points = ((n_steps//history_length) - 1)*10
step_size = history_length//10
all_entropies = np.zeros(shape=(data_points, 12))
seed_id = int(fname[fname.find("steps_")+6:-6])
for t_idx in range(data_points):
for v_id_a in range(n_vehicles):
x_series = velocity_binned_data[(t_idx)*step_size : (t_idx+1)*step_size, v_id_a]
y_series = velocity_binned_data[(t_idx+1)*step_size : (t_idx+2)*step_size, v_id_a]
assert len(x_series) == len(y_series)
row_dat = investigate(x_series, y_series)
all_entropies[t_idx, 3:] = row_dat
all_entropies[t_idx, :3] = (seed_id, t_idx*step_size, v_id_a)
return all_entropies, np.mean(all_entropies, axis=0)
def main(client, fnames, nbins):
results_MI = []
results_HSE = []
results_PI = []
results_PI_temporal = []
for fname in fnames:
results_MI.append(delayed(process_data_MI)(fname, nbins))
for fname in fnames:
results_HSE.append(delayed(process_data_HSE)(fname))
for fname in fnames:
results_PI.append(delayed(process_data_PI)(fname, nbins))
for fname in fnames:
results_PI_temporal.append(delayed(process_data_PI_temporal)(fname, nbins))
merged_data_MI = []
for fut in client.compute(results_MI):
res = fut.result()
merged_data_MI.append(res)
merged_data_HSE = []
for fut in client.compute(results_HSE):
res = fut.result()
merged_data_HSE.append(res)
merged_data_PI = []
for fut in client.compute(results_PI):
res = fut.result()
merged_data_PI.append(res)
merged_data_PI_temporal = []
for fut in client.compute(results_PI_temporal):
res = fut.result()
merged_data_PI_temporal.append(res)
return merged_data_MI, merged_data_HSE, merged_data_PI, merged_data_PI_temporal
policies = ["Policy", "Policy_Random", "Policy_Random_Network", "Policy_Random_Network2", "Policy_Follow_Leader", "Policy_Boids", "Policy_Simplified_Boids"]
dfsMI = []
dfsHSE = []
dfsPI = []
dfsPIt = []
for policy in policies:
fnames = glob.glob("data/{}_10agents_10000steps*".format(policy))
dat_MI, dat_HSE, dat_PI, dat_PIt = main(client, fnames, 10)
stacked_entropies_MI = np.vstack([d[0] for d in dat_MI])
stacked_entropies_PI = np.vstack([d[0] for d in dat_PI])
stacked_entropies_PIt = np.vstack([d[0] for d in dat_PIt])
stacked_entropies_HSE = np.vstack( dat_HSE)
dfMI = pd.DataFrame(stacked_entropies_MI, columns=["Seed", "Vehicle_A", "Vehicle_B", "Min(Hx, Hy)", "Hx+Hy", "Hx", "Hy", "Hxy", "Hy_given_x", "Hx_given_y", "MI_xy", "MI_xy_Normalized"])
dfPI = | pd.DataFrame(stacked_entropies_PI, columns=["Seed", "Vehicle_A", "Min(Hx, Hy)", "Hx+Hy", "Hx", "Hy", "Hxy", "Hy_given_x", "Hx_given_y", "PI_xy", "PI_xy_Normalized"]) | pandas.DataFrame |
# Author : <NAME>
# Date : 23-26 Dec, 2021
# Based on plotly Dash interface for plotting in html https://dash.plotly.com/
# Binance python library https://python-binance.readthedocs.io from Binance API https://binance-docs.github.io
# issues : slow, may overrequest should be converted to websocket api for smooth realtime updates
from binance.client import Client
from binance.enums import *
from matplotlib import pyplot as plt
import datetime
from matplotlib.animation import FuncAnimation
import pandas as pd
import IPython
import pytz
import numpy as np
from config import api_key,api_secret
import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
client = Client(api_key, api_secret)
futures_symbols=pd.DataFrame(client.futures_exchange_info()['symbols']).symbol.values
app = dash.Dash(__name__,external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
app.scripts.config.serve_locally = False
app.layout = html.Div([html.H4(children='Exchange Info'),
dcc.Dropdown(id='currency-pair-dropdown',
options=[{'label':x,'value':x} for x in futures_symbols],value='BTCUSDT'),
html.Div([
html.Div([html.H3('Candlestick chart 🕯 📈 📉'),dcc.Graph(id='klines')],
className="four columns"),
html.Div([html.H3('Ongoing trades ⚖️'),dcc.Graph(id='trades')],
className="four columns"),
html.Div([html.H3('Order book 📙 '),dcc.Graph(id='order-book')],
className="four columns")],
className="row"),
dcc.Interval(id='interval-component',interval=1*500,n_intervals=0)])
@app.callback([Output('klines','figure')],[Input('interval-component','n_intervals'),Input('currency-pair-dropdown','value')])
def update_klines(n,sym):
df=pd.DataFrame(client.get_klines(symbol=sym,interval=Client.KLINE_INTERVAL_1MINUTE))
df.columns = ['opentime','open','high','low','close','volume','closetime','quote_asset_volume','number_of_trades','taker_buy_base_asset_volume','taker_buy_quote_asset_volume','to_be_ignored']
df['opentime'] = [datetime.datetime.fromtimestamp(x/1000.0) for x in df.opentime]
fig=go.Figure(data=[go.Candlestick(x=df['opentime'],
open=df['open'],
high=df['high'],
low = df['low'],
close = df['close'])])
fig.update_layout(title=sym, yaxis_title='value',width=500,height=500)
fig.update_layout(hoverdistance=0)
return [fig]
@app.callback([Output('trades','figure')],
[Input('interval-component','n_intervals'),
Input('currency-pair-dropdown','value')])
def update_trade_price_chart(n,sym):
trades = client.get_recent_trades(symbol=sym,limit=500)
trades = pd.DataFrame(trades)
trades['time'] = [datetime.datetime.fromtimestamp(x/1000.0) for x in trades.time]
trades['price']=trades['price'].astype(float)
trades['qty'] = trades['qty'].astype(float)
fig= px.scatter(trades,x='time',y='price',size='qty',color='qty')
fig.update_layout(title=sym, yaxis_title='value',width=500,height=500)
fig.update_layout(hoverdistance=0)
return [fig]
@app.callback([Output('order-book','figure')],
[Input('interval-component','n_intervals'),
Input('currency-pair-dropdown','value')])
def update_order_book(n,sym):
depth = client.get_order_book(symbol=sym,limit=1000)
bids_df=pd.DataFrame(depth['bids'],columns=['price','qty'])
bids_df['type']='bids'
asks_df= | pd.DataFrame(depth['asks'],columns=['price','qty']) | pandas.DataFrame |
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
option_context,
)
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_empty_frame_dtypes(self):
empty_df = DataFrame()
tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
nocols_df = DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object))
norows_df = DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc")))
norows_int_df = DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
)
df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3])
ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64})
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float_}))
tm.assert_series_equal(
df.dtypes,
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_dtypes_timedeltas(self):
df = DataFrame(
{
"A": Series(date_range("2012-1-1", periods=3, freq="D")),
"B": Series([timedelta(days=i) for i in range(3)]),
}
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_frame_apply_np_array_return_type(self):
# GH 35517
df = | DataFrame([["foo"]]) | pandas.DataFrame |
import streamlit as st
from google.cloud import storage, bigquery
from google.cloud.bigquery.schema import SchemaField
from google.oauth2 import service_account
from PIL import Image
import json
import io
import os
import pandas as pd
SEND_FEEDBACK = True
class GCP_USER:
def __init__(self, credentials):
self.credentials = service_account.Credentials.from_service_account_info(credentials)
self.storage_cl = storage.Client(credentials=self.credentials)
self.bigq_cl = bigquery.Client(credentials=self.credentials)
def load_image_and_caption(self, feedback_query, caption, uploaded_image, storage_bucket_name, bigquery_table_name):
# Image
with open(feedback_query, "r") as query_file:
query = query_file.read()
df_max_id = self.bigq_cl.query(query).to_dataframe()
df_max_id['MAX_ID'].fillna(0, inplace=True)
new_id = int(df_max_id.loc[0,'MAX_ID']+1)
img_file = f'{str(new_id)}.jpg'
img = Image.open(io.BytesIO(uploaded_image))
img.save(img_file)
blob = self.storage_cl.get_bucket(storage_bucket_name).blob(f'images/{img_file}')
blob.upload_from_filename(img_file)
os.remove(f'{img_file}')
# Caption
df_new_caption = | pd.DataFrame({'PATH': [img_file], 'CAPTION': [caption], 'ID': new_id}) | pandas.DataFrame |
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.widgets import CheckButtons
from pandas.plotting import scatter_matrix
import ta
import talib
#https://technical-analysis-library-in-python.readthedocs.io/en/latest/ta.html#momentum-indicators
#https://towardsdatascience.com/trading-toolbox-04-subplots-f6c353278f78
def MA(df, days):
name = 'ma'+str(days)
df[name] = df['close'].rolling(days).mean()
return df
def AddIndicators(df):
#df = add_all_ta_features(df, open="###", high="high", low="low", close="close", volume="vol") Missing open
### Momentum ###
# Moving Averages (50 days and 200 days)
MA(df, 50)
MA(df, 200)
# RSI
df['rsi'] = ta.momentum.RSIIndicator(df['close'], window = 14).rsi()
# Stochastic Oscillator
df['stoch'] = ta.momentum.StochasticOscillator(df['high'], df['low'], df['close'], window = 14).stoch()
# Rate of Change
df['roc'] = ta.momentum.roc(df['close'], window = 14)
# TSI - True strength index
df['tsi'] = ta.momentum.tsi(df['close'], window_slow = 25, window_fast = 13)
### Volume ###
# Accumulation / Distribution Index (ADI)
df['adi'] = ta.volume.AccDistIndexIndicator(df['high'], df['low'], df['close'], df['vol']).acc_dist_index()
# Chaikin Money Flow
df['cmf'] = ta.volume.ChaikinMoneyFlowIndicator(df['high'], df['low'], df['close'], df['vol']).chaikin_money_flow()
# Ease of Movement
df['eom'] = ta.volume.EaseOfMovementIndicator(df['high'], df['low'], df['vol'], window = 14).ease_of_movement()
# Money Flow Index
df['mfi'] = ta.volume.MFIIndicator(df['high'], df['low'], df['close'], df['vol'], window = 14).money_flow_index()
# Negative Volume Index (NVI)
df['nvi'] = ta.volume.NegativeVolumeIndexIndicator(df['close'], df['vol']).negative_volume_index()
# On-balance volume (OBV)
df['obv'] = ta.volume.OnBalanceVolumeIndicator(df['close'], df['vol']).on_balance_volume()
# Volume-price trend (VPT)
df['vpt'] = ta.volume.VolumePriceTrendIndicator(df['close'], df['vol']).volume_price_trend()
# Volume Weighted Average Price (VWAP)
df['vwap'] = ta.volume.VolumeWeightedAveragePrice(df['high'], df['low'], df['close'], df['vol'], window = 14).volume_weighted_average_price()
### Volatility ###
# Bollinger Bands
df['blband'] = ta.volatility.BollingerBands(df['close'], window = 14).bollinger_lband()
df['bhband'] = ta.volatility.BollingerBands(df['close'], window = 14).bollinger_hband()
# Average True Range (ATR)
df['atr'] = ta.volatility.AverageTrueRange(df['high'], df['low'], df['close'], window = 14).average_true_range()
# Donchian Channel
df['dlband'] = ta.volatility.DonchianChannel(df['high'], df['low'], df['close'], window = 14).donchian_channel_lband()
df['dhband'] = ta.volatility.DonchianChannel(df['high'], df['low'], df['close'], window = 14).donchian_channel_hband()
# Keltner Channels
df['klband'] = ta.volatility.KeltnerChannel(df['high'], df['low'], df['close'], window = 14).keltner_channel_lband()
df['khband'] = ta.volatility.KeltnerChannel(df['high'], df['low'], df['close'], window = 14).keltner_channel_hband()
### Trend ###
# Average Directional Movement Index (ADX)
df['adx'] = ta.trend.ADXIndicator(df['high'], df['low'], df['close'], window = 14).adx()
# Commodity Channel Index (CCI)
df['cci'] = ta.trend.CCIIndicator(df['high'], df['low'], df['close'], window = 14).cci()
# Exponential Moving Average (EMA)
df['ema'] = ta.trend.EMAIndicator(df['close'], window = 14).ema_indicator()
# KST Oscillator (KST Signal)
df['kst'] = ta.trend.KSTIndicator(df['close']).kst()
# Moving Average Convergence Divergence (MACD)
df['macd'] = ta.trend.MACD(df['close']).macd()
return df
# SP500
sp500_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/SP500_7yr_daily.csv')
sp500_frame = pd.DataFrame(sp500_data, columns = ['ticker', 'descr', 'date', 'close', 'retx'])
# XLB
xlb_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLB_7yr_daily.csv')
xlb_frame = pd.DataFrame(xlb_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx'])
AddIndicators(xlb_frame)
# XLC
xlc_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLC_7yr_daily.csv')
xlc_frame = pd.DataFrame(xlc_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx'])
AddIndicators(xlc_frame)
# XLE
xle_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLE_7yr_daily.csv')
xle_frame = pd.DataFrame(xle_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx'])
AddIndicators(xle_frame)
# XLF
xlf_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLF_7yr_daily.csv')
xlf_frame = pd.DataFrame(xlf_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx'])
AddIndicators(xlf_frame)
# XLI
xli_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLI_7yr_daily.csv')
xli_frame = | pd.DataFrame(xli_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from pylab import *
from scipy.ndimage import measurements
import matplotlib.colors as colors
import os
fec_ini ='2019-05-15'
fec_fin ='2019-12-31'
Recorte_Rad = 'no' ##---> Será 'si' para que recorte el set de Reflectancias original a las fechas de COD y los enmascare.
## En 'si consume mucha ram y debe correrse este programa por partes'
fi = datetime.datetime.strptime(fec_ini,"%Y-%m-%d")
ff =datetime. datetime.strptime(fec_fin,"%Y-%m-%d")
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##--------------------SECCION UNO: RELACION AREAS Y COD-----------------------##
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Programa para analizar la relacion entre la COD y las refelctancias, asi como el
area de las nubes y las COD. En amos casos se grafica el scatter.
"""
###############################################################################
##---------------LECTURA DEL NETCDF CON LOS DATOS GOES COD-------------------##
###############################################################################
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_COD_05-2019-12-2019.nc')
COD = ds.variables['COD'][:, :, :]
tiempo = ds.variables['time']
fechas_horas_COD = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas_COD)):
fechas_horas_COD[i] = fechas_horas_COD[i].strftime('%Y-%m-%d %H:%M')
fechas_horas_COD = pd.to_datetime(fechas_horas_COD, format="%Y-%m-%d %H:%M", errors='coerce')
lat_COD = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lat_COD_Junio.npy')
lon_COD = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lon_COD_Junio.npy')
COD = np.ma.filled(COD, fill_value=0.)
COD[COD ==0.] =np.nan
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
np.save(Path_save[0:45]+'Array_COD_05-2019-12-2019', COD)
np.save(Path_save[0:45]+'Array_FechasHoras_COD_05-2019-12-2019',fechas_horas_COD )
if Recorte_Rad == 'si':
###############################################################################
## -------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL-------------- ##
###############################################################################
Rad_origin = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas_Rad_origin = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_CH2__2018_2019.npy')
fechas_horas_Rad_origin = pd.to_datetime(fechas_horas_Rad_origin, format="%Y-%m-%d %H:%M", errors='coerce')
Rad = Rad_origin[(fechas_horas_Rad_origin>= fi)&(fechas_horas_Rad_origin<=ff)]
fechas_horas_Rad = fechas_horas_Rad_origin[(fechas_horas_Rad_origin>= fi)&(fechas_horas_Rad_origin<=ff)]
################################################################################
## -----------------LECTURA DE LOS UMBRALES DE LAS REFLECTANCIAS------------- ##
################################################################################
df_UmbralH_Nube_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube = pd.concat([df_UmbralH_Nube_348, df_UmbralH_Nube_350, df_UmbralH_Nube_975], axis=1)
df_UmbralH_Nube = df_UmbralH_Nube.mean(axis = 1, skipna = True)
df_UmbralH_Nube = pd.DataFrame(df_UmbralH_Nube, columns=['Umbral'])
################################################################################
## -----------------------ENMASCARAMIENTO DE LAS IMAGENES---------------------##
################################################################################
Rad_bina = []
Rad_mask = []
fechas_horas_new = []
for i in range (len(fechas_horas_Rad)):
for j in range(len(df_UmbralH_Nube.Umbral.index)):
if df_UmbralH_Nube.Umbral.index[j] == fechas_horas_Rad[i].hour:
umbral = df_UmbralH_Nube.Umbral[j+6]
rad = Rad[i, :, :]
radbi = (rad > umbral).astype(int)
rad[rad<umbral]=np.nan
Rad_bina.append(radbi)
Rad_mask.append(rad)
fechas_horas_new.append(fechas_horas_Rad[i])
print('yes')
else:
pass
Rad_bina = np.array(Rad_bina)
Rad_mask = np.array(Rad_mask)
##----------------------------------------------------------------------------------------##
fechas_horas_new = [fechas_horas_new[i].strftime('%Y-%m-%d %H:%M') for i in range(len(fechas_horas_new))]
fechas_horas_new = np.array(fechas_horas_new)
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
np.save(Path_save[0:45]+'Array_Rad_bina_05-2019-12-2019', Rad_bina)
np.save(Path_save[0:45]+'Array_Rad_mask_05-2019-12-2019', Rad_mask)
np.save(Path_save[0:45]+'Array_Fechas_Horas_Rad_05-2019-12-2019', fechas_horas_new)
else:
Rad_bina = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_bina_05-2019-12-2019.npy')
Rad_mask = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_mask_05-2019-12-2019.npy')
fechas_horas_new = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Fechas_Horas_Rad_05-2019-12-2019.npy')
fechas_horas_new = pd.to_datetime(fechas_horas_new, format="%Y-%m-%d %H:%M", errors='coerce')
pass
################################################################################
## -------------LEYENDO LAS AREAS DE LAS IMAGENES ENMASCARADAS----------------##
################################################################################
"""
El procedimiendo para obtener las areas es muy pesado entonces toca hacerlo
por partes, es decir, para longitudes aproximadas de 5mil elementos.
"""
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
Area_1 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_AREA1Rad_bina_05-2019-12-2019.npy')
Area_2 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_AREA2Rad_bina_05-2019-12-2019.npy')
Area_3 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_AREA3Rad_bina_05-2019-12-2019.npy')
np.load = np_load_old
Area_complete = np.concatenate((Area_1, Area_2, Area_3))
#Area_complete = (Area_complete [Area_complete >=4]).all()
################################################################################
## --------------CREANDO DF CON LOS DATOS DE AREAS DE INTERES-----------------##
################################################################################
Area=[]
for i in range(len(fechas_horas_new)):
a = Area_complete[i]
a[a<4.]=np.nan
Area.append(a)
Area = np.array(Area)
Area_max=[]
for i in range(len(fechas_horas_new)):
a_max = np.nanmax(Area[i])
Area_max.append(a_max)
Area_max = np.array(Area_max)
Area_min=[]
for i in range(len(fechas_horas_new)):
a_min = np.nanmin(Area[i])
Area_min.append(a_min)
Area_min = np.array(Area_min)
Area_mean=[]
for i in range(len(fechas_horas_new)):
a_mean = np.nanmean(Area[i])
Area_mean.append(a_mean)
Area_mean = np.array(Area_mean)
df_areas = pd.DataFrame({'Area':Area, 'Area_max':Area_max, 'Area_min':Area_min, 'Area_mean':Area_mean}, index= fechas_horas_new)
################################################################################
## ---------------CREANDO DF CON LOS DATOS DE COD DE INTERÉS------------------##
################################################################################
COD_max=[]
for i in range(len(fechas_horas_COD)):
a_max = np.nanmax(COD[i])
COD_max.append(a_max)
COD_max = np.array(COD_max)
COD_min=[]
for i in range(len(fechas_horas_COD)):
a_min = np.nanmin(COD[i])
COD_min.append(a_min)
COD_min = np.array(COD_min)
COD_mean=[]
for i in range(len(fechas_horas_COD)):
a_mean = np.nanmean(COD[i])
COD_mean.append(a_mean)
COD_mean = np.array(COD_mean)
df_COD = | pd.DataFrame({'COD_max':COD_max, 'COD_min':COD_min, 'COD_mean':COD_mean}, index= fechas_horas_COD) | pandas.DataFrame |
import math
import os
import timeit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import tensorflow as tf
import random
from matplotlib.lines import Line2D
from data import Dataset
from prediction import train_model, test_model
from prediction import load_encoder_and_predictor_weights
from prediction import initialize_optimizer
from sklearn.preprocessing import OrdinalEncoder
class ActLrnResults:
""" Bundles AL results. """
def __init__(
self,
train_loss,
val_loss,
test_loss,
iter_usage,
iter_time,
budget_usage,
sensor_usage,
streamtime_usage,
prediction_model,
test_data,
picked_cand_index_set,
picked_times_index_hist,
picked_spaces_index_hist,
picked_inf_score_hist,
budget_usage_hist,
iter_time_hist,
sensor_usage_hist,
streamtime_usage_hist,
val_loss_hist,
initial_sensors_list
):
self.train_loss = train_loss
self.val_loss = val_loss
self.test_loss = test_loss
self.iter_usage = iter_usage
self.iter_time = iter_time
self.budget_usage = budget_usage
self.sensor_usage = sensor_usage
self.streamtime_usage = streamtime_usage
self.prediction_model = prediction_model
self.test_data = test_data
self.picked_cand_index_set = picked_cand_index_set
self.picked_times_index_hist = picked_times_index_hist
self.picked_spaces_index_hist = picked_spaces_index_hist
self.picked_inf_score_hist = picked_inf_score_hist
self.budget_usage_hist = budget_usage_hist
self.iter_time_hist = iter_time_hist
self.sensor_usage_hist = sensor_usage_hist
self.streamtime_usage_hist = streamtime_usage_hist
self.val_loss_hist = val_loss_hist
self.initial_sensors_list = initial_sensors_list
def encode_features(
HYPER,
raw_data,
models,
dataset,
available_index_set_update,
AL_variable,
silent=True
):
""" Encodes features AL_variable of dataset, or passes labels for
AL_variable being Y_(t,s). Also returns the random index array that is
created when CAND_SUBSAMPLE_ACT_LRN is not None and smaller than dataset
size.
"""
if not silent:
# tell us what we are doing
print(
'Encoding features into embedded vector spaces for', AL_variable
)
### Create random subsample before encoding if wanted ###
# create an index array in the length of the passed dataset
n_datapoints = len(available_index_set_update)
index_array = list(available_index_set_update)
# if we chose a subset of these data points, create a random sub-sample
if (
HYPER.CAND_SUBSAMPLE_ACT_LRN is not None and
HYPER.CAND_SUBSAMPLE_ACT_LRN < n_datapoints
):
n_datapoints = HYPER.CAND_SUBSAMPLE_ACT_LRN
index_array = random.sample(
available_index_set_update,
n_datapoints
)
# create copy of dataset
X_t = dataset.X_t[index_array]
X_s = dataset.X_s[index_array]
X_st = dataset.X_st[index_array]
Y = dataset.Y[index_array]
if HYPER.SPATIAL_FEATURES != 'image':
X_s1 = dataset.X_s1[index_array]
### Encode features here ###
if AL_variable == 'X_t':
encoding = models.X_t_encoder.predict(X_t)
elif AL_variable == 'X_st':
encoding = models.X_st_encoder.predict(X_st)
elif AL_variable == 'X_s1':
if HYPER.SPATIAL_FEATURES != 'image':
encoding = models.X_s1_encoder.predict(X_s1)
else:
### Encode X_s1 ###
encoding = np.zeros((n_datapoints, HYPER.ENCODING_NODES_X_s))
# iterate over all datapoints
for i in range(n_datapoints):
building_id = X_s[i][0]
# prepare imagery data
x_s1 = raw_data.building_imagery_data_list[
raw_data.building_imagery_id_list.index(int(building_id))
]
x_s1 = np.expand_dims(x_s1, axis=0)
# make predictions and save results in respective matrix
encoding[i] = models.X_s1_encoder.predict(x_s1)
elif AL_variable == 'X_(t,s)':
if HYPER.SPATIAL_FEATURES != 'image':
encoding = models.X_joint_encoder.predict([X_t, X_s1, X_st])
else:
### Encode X_joint ###
encoding = np.zeros((n_datapoints, HYPER.ENCODING_NODES_X_joint))
# iterate over all datapoints
for i in range(n_datapoints):
# Get training data of currently iterated batch
x_t = X_t[i]
x_st = X_st[i]
y = Y[i]
building_id = X_s[i][0]
cluster_id = X_s[i][1]
# prepare imagery data
x_s1 = raw_data.building_imagery_data_list[
raw_data.building_imagery_id_list.index(int(building_id))
]
# Expand dimensions for batching
x_t = np.expand_dims(x_t, axis=0)
x_s1 = np.expand_dims(x_s1, axis=0)
x_st = np.expand_dims(x_st, axis=0)
# Create model input list
model_input_list = [x_t, x_s1, x_st]
# make predictions and save results in respective matrix
encoding[i] = models.X_joint_encoder.predict(model_input_list)
elif AL_variable == 'Y_hat_(t,s)':
if HYPER.SPATIAL_FEATURES != 'image':
encoding = models.prediction_model.predict([X_t, X_s1, X_st])
else:
### Predict Y ###
encoding = np.zeros((n_datapoints, HYPER.PREDICTION_WINDOW))
# iterate over all datapoints
for i in range(n_datapoints):
# Get training data of currently iterated batch
x_t = X_t[i]
x_st = X_st[i]
y = Y[i]
building_id = X_s[i][0]
cluster_id = X_s[i][1]
# Prepare imagery data
x_s1 = raw_data.building_imagery_data_list[
raw_data.building_imagery_id_list.index(int(building_id))
]
# Expand dimensions for batching
x_t = np.expand_dims(x_t, axis=0)
x_s1 = np.expand_dims(x_s1, axis=0)
x_st = np.expand_dims(x_st, axis=0)
# Create model input list
model_input_list = [x_t, x_s1, x_st]
# make predictions and save results in respective matrix
encoding[i] = models.prediction_model.predict(model_input_list)
elif AL_variable == 'Y_(t,s)':
encoding = Y
else:
print('query variable not recognized.')
return encoding, index_array
def compute_clusters(
HYPER,
encoding,
cand_batch_size,
silent=True
):
""" Calculates clusters in the passed encoding vectors using
HYPER.METHOD_CLUSTERS[0]. Returns cluster labels and centers.
"""
if not silent:
# tell us what we are doing
print(
'Creating clusters in encodings with n_clusters=', cand_batch_size
)
# set the clustering method that we chose
method = HYPER.METHOD_CLUSTERS[0]
# set number of clusters equal to passed or corrected value
clustering_method = method(n_clusters=cand_batch_size)
# cluster encodings
clustering_method.fit(encoding)
cluster_labels = clustering_method.labels_
cluster_centers = clustering_method.cluster_centers_
# get ordinal encoder from Sklearn
enc = OrdinalEncoder()
# encode labels. NOTE: ordinally encoding clusters ensures that cluster
# labels start at 0 and end at number of clusters, which is not the case
# for X_t and X_s1 when not ordinally encoding.
cluster_labels = enc.fit_transform(
np.expand_dims(cluster_labels, 1)
).astype(int)
cluster_centers = cluster_centers[enc.categories_[0]]
# delete expanded dimension again as it is redundant
cluster_labels = cluster_labels[:, 0]
# calculate number of clusters created in data
n_clusters = max(cluster_labels) + 1
return cluster_labels, cluster_centers, n_clusters
def compute_similarity(
HYPER,
encoding,
cluster_labels,
cluster_centers,
silent=True
):
""" Calculates distances to cluster centers. A large value means that
encoding is close to its cluster center. A small value means that encoding
is far from cluster center.
"""
if not silent:
# tell us what we are doing
print("Calculating distances" )
# create a progress bar for training
progbar_distance = tf.keras.utils.Progbar(len(encoding))
# get the kernel function we chose
metric = HYPER.METRIC_DISTANCES[0]
# set the number of encoded data points
n_enc_datapoints = len(encoding)
# CAUTION: create shape (n_enc_datapoints,) instead of (n_enc_datapoints, 1)
similarity_array = np.zeros((n_enc_datapoints,))
# iterate over all encodings
for i in range(n_enc_datapoints):
# get encoding's cluster label
label = cluster_labels[i]
# get cluster's center
center = cluster_centers[label]
# calculate similarity/closeness of encoding to its cluster center
similarity_array[i] = metric(
np.expand_dims(center, axis=0), np.expand_dims(encoding[i], axis=0)
)
if not silent:
# increment progress bar
progbar_distance.add(1)
return similarity_array
def feature_embedding_AL(
HYPER,
pred_type,
models,
raw_data,
train_data,
candidate_dataset,
loss_object,
optimizer,
mean_loss,
loss_function,
method,
AL_variable=None,
silent=True,
):
""" Given the prediction models 'models' which are trained on the initially
available data points 'train_data', it selects a batch of data points to
query labels for from the pool candidate data points 'candidate_dataset'.
Three different methods can be chosen through 'method' and set to
'cluster-far', 'cluster-close' and 'cluster-rnd', each standing for another
variant of the algorithm:
1. 'cluster-far': maximizes embedding entropy
2. 'cluster-close': minimized embedding entropy
3. 'cluster-rnd': chooses points of random embedding entropy from each
cluster uniformly
"""
### Compute some initial values ###
# Compute total data budget
data_budget = math.floor(
HYPER.DATA_BUDGET_ACT_LRN * candidate_dataset.n_datapoints
)
# compute number of sensors and times in training data
n_times_0 = len(np.unique(train_data.X_t, axis=0))
n_sensors_0 = len(np.unique(train_data.X_s, axis=0))
# create a list of initial sensors to save in results
initial_sensors_list = list(set(train_data.X_s[:, 0]))
# compute number of new times in candidate data
n_times_new = (
len(
np.unique(
np.concatenate(
(train_data.X_t, candidate_dataset.X_t)
),
axis=0)
)
- n_times_0
)
# compute number of new sensors in candidate data
n_sensors_new = (
len(
np.unique(
np.concatenate(
(train_data.X_s, candidate_dataset.X_s)
),
axis=0)
)
- n_sensors_0
)
if not silent:
# tell us what we are doing
print(
'prediction task: {}'.format(
pred_type
)
)
print(
'AL variable: {}'.format(
AL_variable
)
)
print(
'AL variant: {}'.format(
method
)
)
print(
'distance metric: {}'.format(
HYPER.DISTANCE_METRIC_ACT_LRN
)
)
print(
'clustering method: {}'.format(
HYPER.CLUSTER_METHOD_ACT_LRN
)
)
print(
'data budget: {}/{} ({:.0%})'.format(
data_budget,
candidate_dataset.n_datapoints,
HYPER.DATA_BUDGET_ACT_LRN
)
)
print(
'used sensors: {}'.format(
n_sensors_0
)
)
print(
'new sensors to place: {}'.format(
n_sensors_new
)
)
print(
'used streaming times: {}'.format(
n_times_0
)
)
print(
'new streaming times to use: {}'.format(
n_times_new
)
)
### Load model weights ###
# Note: if you load entire initial models, instead of their weights only,
# network configuration information is lost and tf will not train encoders
# alongside training the main prediction model.
models = load_encoder_and_predictor_weights(
raw_data,
models,
pred_type
)
(
loss_object,
optimizer,
loss_function,
mean_loss,
) = initialize_optimizer(HYPER)
### Start AL algorithm ###
# initialize some counters
data_counter = 0
cand_batch_size = 0
iteration_counter = 0
sensor_counter = 0
streamtime_counter = 0
picked_cand_index_set = set()
available_index_set_update = set(np.arange(candidate_dataset.n_datapoints))
budget_usage_hist = []
iter_time_hist = []
picked_times_index_hist = []
picked_spaces_index_hist = []
picked_inf_score_hist = []
sensor_usage_hist = []
streamtime_usage_hist = []
val_loss_hist = []
# Set starting time of algorithm
t_start_0 = timeit.default_timer()
# start Active Learning and stop once data_counter reaches data_budget or
# iteration_counter reaches max iterations
while (
data_counter < data_budget and
iteration_counter < HYPER.MAX_ITER_ACT_LRN
):
if not silent:
# mark beginning of iteration
print('---' * 3)
# Set the start time
t_start = timeit.default_timer()
### Set batch size ###
# compute the batch siz of this iteration
cand_batch_size = HYPER.CAND_BATCH_SIZE_ACT_LRN * data_budget
# if exceeding candidate data subsample, adjust batch size
if HYPER.CAND_SUBSAMPLE_ACT_LRN is not None:
cand_batch_size = min(
cand_batch_size,
HYPER.CAND_SUBSAMPLE_ACT_LRN
)
# if exceeding data budget, adjust batch size
cand_batch_size = min(
cand_batch_size,
data_budget - data_counter
)
# transform cand_batch_siz to integer
cand_batch_size = int(cand_batch_size)
### Choose candidates to query ###
if method == 'PL':
### Choose queries according to PL (random) ###
# Create a random batch_index_array
batch_index_list = random.sample(
available_index_set_update,
cand_batch_size
)
else:
### Encode data points *tested* ###
candidate_encoded, cand_sub_index = encode_features(
HYPER,
raw_data,
models,
candidate_dataset,
available_index_set_update,
AL_variable,
)
### Calculate clusters *tested* ###
cand_labels, cand_centers, n_clusters = compute_clusters(
HYPER,
candidate_encoded,
cand_batch_size
)
### Compute similarity values for each candidate ###
if method != 'rnd d_c':
### Calculate distances *tested* ###
# calculates far points with small similarity value
cand_similarity_array = compute_similarity(
HYPER,
candidate_encoded,
cand_labels,
cand_centers
)
if method == 'max d_c':
# reverse order by multiplying with -1
# --> smallest becomes most similar
# --> turns similarity into distance array
cand_similarity_array = -1 * cand_similarity_array
### Choose data from clusters *tested* ###
# create zero array that is filled with cluster IDs for this batch
batch_index_list = []
inf_score_list = []
# iterates over the batch_index_array up to cand_batch_size
cluster_batch_counter = 0
# iterates over clusters until n_clusters, then resets to 0
# if cluster_batch_counter does not reached cand_batch_size
cluster_index = 0
# iterate over all clusters until cluster_batch_counter reaches
# cand_batch_size
while cluster_batch_counter < cand_batch_size:
# get an array of indices matching to currently iterated cluster
# ID
index_array = np.where(cand_labels == cluster_index)[0]
# if the set is not empty
if len(index_array) != 0:
if method == 'rnd d_c':
# choose one element at random from this index array
index_choice = np.random.choice(index_array)
else:
# get similarity values for matching index array
similarity_array = cand_similarity_array[index_array]
if method == 'avg d_c':
# turn into absolute difference to average similarity
similarity_array = abs(
similarity_array - np.mean(similarity_array)
)
# calculate largest similarity
max_similarity = similarity_array.max()
# choose first/largest value from similarity_array
index_choice = index_array[
np.where(
similarity_array == max_similarity
)[0][0]
]
# add information content score of data point to inf_score_list
inf_score_list.append(max_similarity)
# add randomly chosen index to zero array
batch_index_list.append(cand_sub_index[index_choice])
# setting the cluster ID to -1 excludes data point from
# considerations in next iterations of this loop
cand_labels[index_choice] = -1
# increment the counter for already added data points to
# zero array
cluster_batch_counter += 1
# increment the cluster ID index for the next iteration
cluster_index += 1
# set cluster ID index to zero for next iteration if an entire
# round of iterations did not fill zero array
if cluster_index >= n_clusters:
cluster_index = 0
### Compute the set of queried data points *tested* ###
# compute how many points were queried until last iteration
n_used_cand_data_total = len(picked_cand_index_set)
# update the set of points queried until now including this iteration
picked_cand_index_set = picked_cand_index_set.union(
set(batch_index_list)
)
# create a list from the set
picked_cand_index_list = list(picked_cand_index_set)
# compute the number of new data points queried in this iteration
n_new_data = len(picked_cand_index_set) - n_used_cand_data_total
### Create new training batch ###
if HYPER.EXTEND_TRAIN_DATA_ACT_LRN:
# get share of training data from the pool of possible testing points
X_t_ord_1D_new_train = np.concatenate(
(
train_data.X_t_ord_1D,
candidate_dataset.X_t_ord_1D[
picked_cand_index_list
]
),
axis=0
)
X_t_new_train = np.concatenate(
(
train_data.X_t,
candidate_dataset.X_t[
picked_cand_index_list
]
),
axis=0
)
X_s_new_train = np.concatenate(
(
train_data.X_s,
candidate_dataset.X_s[
picked_cand_index_list
]
),
axis=0
)
X_st_new_train = np.concatenate(
(
train_data.X_st,
candidate_dataset.X_st[
picked_cand_index_list
]
),
axis=0
)
Y_new_train = np.concatenate(
(
train_data.Y,
candidate_dataset.Y[
picked_cand_index_list
]
),
axis=0
)
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_train = np.concatenate(
(
train_data.X_s1,
candidate_dataset.X_s1[
picked_cand_index_list
]
),
axis=0
)
else:
X_s1_new_train = 0
else:
# get share of training data from pool of possible testing points
X_t_ord_1D_new_train = candidate_dataset.X_t_ord_1D[batch_index_list]
X_t_new_train = candidate_dataset.X_t[batch_index_list]
X_s_new_train = candidate_dataset.X_s[batch_index_list]
X_st_new_train = candidate_dataset.X_st[batch_index_list]
Y_new_train = candidate_dataset.Y[batch_index_list]
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_train = candidate_dataset.X_s1[
batch_index_list
]
else:
X_s1_new_train = 0
### Update training data for counting sensors and stream times ###
# causing duplicate points on purpose when RED_CAND_DATA_ACT_LRN=False
train_data_update_X_t_ord_1D = np.concatenate(
(
train_data.X_t_ord_1D,
candidate_dataset.X_t_ord_1D[
picked_cand_index_list
]
),
axis=0
)
train_data_update_X_t = np.concatenate(
(
train_data.X_t,
candidate_dataset.X_t[
picked_cand_index_list
]
),
axis=0
)
train_data_update_X_s = np.concatenate(
(
train_data.X_s,
candidate_dataset.X_s[
picked_cand_index_list
]
),
axis=0
)
### Update candidate data ###
# update candidate data if chosen so
if HYPER.RED_CAND_DATA_ACT_LRN:
# update set of available indices
available_index_set_update = (
available_index_set_update - picked_cand_index_set
)
### Create (updated) validation data ###
# update validation data if chosen so
if HYPER.UPD_VAL_DATA_ACT_LRN:
X_t_ord_1D_new_val = np.delete(
candidate_dataset.X_t_ord_1D, picked_cand_index_list, 0
)
X_t_new_val = np.delete(
candidate_dataset.X_t, picked_cand_index_list, 0
)
X_s_new_val = np.delete(
candidate_dataset.X_s, picked_cand_index_list, 0
)
X_st_new_val = np.delete(
candidate_dataset.X_st, picked_cand_index_list, 0
)
Y_new_val = np.delete(
candidate_dataset.Y, picked_cand_index_list, 0
)
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_val = np.delete(
candidate_dataset.X_s1, picked_cand_index_list, 0
)
else:
X_s1_new_val = 0
else:
# create new validation data by copying from initial candidate data
X_t_ord_1D_new_val = candidate_dataset.X_t_ord_1D
X_t_new_val = candidate_dataset.X_t
X_s_new_val = candidate_dataset.X_s
X_st_new_val = candidate_dataset.X_st
Y_new_val = candidate_dataset.Y
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_val = candidate_dataset.X_s1
else:
X_s1_new_val = 0
### Train and validate with new batches, avoids unwanted shuffling ###
# create new training dataset
new_train_batch = Dataset(
X_t_ord_1D_new_train,
X_t_new_train,
X_s_new_train,
X_s1_new_train,
X_st_new_train,
Y_new_train
)
# create new validation dataset
new_val_data = Dataset(
X_t_ord_1D_new_val,
X_t_new_val,
X_s_new_val,
X_s1_new_val,
X_st_new_val,
Y_new_val
)
# train model with new data
train_hist_batch, val_hist_batch = train_model(
HYPER,
models.prediction_model,
new_train_batch,
new_val_data,
raw_data,
loss_object,
optimizer,
mean_loss,
)
# keep track of loss histories
if data_counter == 0:
train_hist = train_hist_batch
val_hist = val_hist_batch
else:
train_hist = np.concatenate((train_hist, train_hist_batch))
val_hist = np.concatenate((val_hist, val_hist_batch))
### Update counters ###
# get ending time
t_end = timeit.default_timer()
# increment some counters
iteration_counter += 1
data_counter += n_new_data
sensor_counter = len(
np.unique(train_data_update_X_s, axis=0)
) - n_sensors_0
streamtime_counter = len(
np.unique(train_data_update_X_t, axis=0)
) - n_times_0
# budget share that is eventually used
cand_data_usage = data_counter / data_budget
# time in seconds that is used in this iteration
iter_time = math.ceil(t_end - t_start)
# if there were any new sensors to add, get share that was added
if n_sensors_new != 0:
percent_sensors = sensor_counter / n_sensors_new
else:
percent_sensors = 0
# if there were any new streamtimes to add, get share that was added
if n_times_new != 0:
percent_streamtimes = streamtime_counter / n_times_new
else:
percent_streamtimes = 0
# add data usage to history
budget_usage_hist.append(cand_data_usage)
# add iteration time to history
iter_time_hist.append(iter_time)
# add sensor usage to history
sensor_usage_hist.append(percent_sensors)
# add streamtime usage to history
streamtime_usage_hist.append(percent_streamtimes)
# add batch index times to history
picked_times_index_hist.append(candidate_dataset.X_t_ord_1D[batch_index_list])
# add batch index spaces to history
picked_spaces_index_hist.append(candidate_dataset.X_s[batch_index_list][:, 0])
# add similarity scores to history if not 'PL' and 'rnd d_c' methods
if method != 'PL' and method != 'rnd d_c':
# if 'max d_c' method then the information score is negative and reversed
# so adding with one equals 1 - (- similarity) and gives information score
if method == 'max d_c':
picked_inf_score_hist.append([1+x for x in inf_score_list])
else:
picked_inf_score_hist.append(inf_score_list)
# add last validation loss value to test loss history
val_loss_hist.append(val_hist[-1])
if not silent:
# tell us the numbers
print(
'Iteration: {}'.format(
iteration_counter
)
)
print(
'Time: {}s'.format(
iter_time
)
)
print(
'Trained on candidate batch size: {}'.format(
cand_batch_size
)
)
print(
'Used streaming times: {}/{} ({:.0%})'.format(
streamtime_counter, n_times_new, percent_streamtimes
)
)
print(
'Used sensors: {}/{} ({:.0%})'.format(
sensor_counter, n_sensors_new, percent_sensors
)
)
print(
'Used data budget: {}/{} ({:.0%})'.format(
data_counter, data_budget, cand_data_usage
)
)
# mark end of test for currently iterated sorting array
if not silent:
print('---' * 20)
# time in seconds that is eventually used
iter_time = math.ceil(t_end - t_start_0)
### Create test dataset and predict ###
# create new validation data by deleting batch of picked data from candidates
X_t_ord_1D_test = np.delete(
candidate_dataset.X_t_ord_1D,
picked_cand_index_list,
0
)
X_t_test = np.delete(
candidate_dataset.X_t,
picked_cand_index_list,
0
)
X_s_test = np.delete(
candidate_dataset.X_s,
picked_cand_index_list,
0
)
X_st_test = np.delete(
candidate_dataset.X_st,
picked_cand_index_list,
0
)
Y_test = np.delete(
candidate_dataset.Y,
picked_cand_index_list,
0
)
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_test = np.delete(
candidate_dataset.X_s1,
picked_cand_index_list,
0
)
else:
X_s1_test = 0
# create a copy of candidate test data
test_data = Dataset(
X_t_ord_1D_test,
X_t_test,
X_s_test,
X_s1_test,
X_st_test,
Y_test
)
# Predict on candidate datapoints that are not in training data
title = '{} {} {}'.format(pred_type, AL_variable, method)
test_loss = test_model(
HYPER,
title,
models.prediction_model,
test_data,
raw_data,
mean_loss,
loss_function
)
### Shorten test dataset to random subsample ###
if HYPER.SAVED_SAMPLES_ACT_LRN >= test_data.n_datapoints:
rnd_array = np.arange(test_data.n_datapoints)
else:
# choose a subsample of the test data for saving
rnd_array = random.sample(
list(np.arange(test_data.n_datapoints)),
HYPER.SAVED_SAMPLES_ACT_LRN
)
X_t_ord_1D_test = X_t_ord_1D_test[rnd_array]
X_t_test = X_t_test[rnd_array]
X_s_test = X_s_test[rnd_array]
X_st_test = X_st_test[rnd_array]
Y_test = Y_test[rnd_array]
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_test = X_s1_test[rnd_array]
else:
X_s1_test = 0
# overwrite test_data with samples you want to save
test_data = Dataset(
X_t_ord_1D_test,
X_t_test,
X_s_test,
X_s1_test,
X_st_test,
Y_test
)
### Create a results object ###
# create an ActLrnResults object and pass the results for compactness
results = ActLrnResults(
train_hist,
val_hist,
test_loss,
iteration_counter,
iter_time,
cand_data_usage,
percent_sensors,
percent_streamtimes,
models.prediction_model,
test_data,
picked_cand_index_set,
picked_times_index_hist,
picked_spaces_index_hist,
picked_inf_score_hist,
budget_usage_hist,
iter_time_hist,
sensor_usage_hist,
streamtime_usage_hist,
val_loss_hist,
initial_sensors_list
)
return results
def test_AL_sequence_importance(
HYPER,
pred_type,
models,
raw_data,
train_data,
candidate_dataset,
loss_object,
optimizer,
mean_loss,
loss_function,
AL_results,
method,
AL_variable=None,
silent=True
):
""" Tests the importance of the query sequence for passed AL results """
if HYPER.TEST_SEQUENCE_IMPORTANCE:
if not silent:
# create a progress bar for training
progbar_seqimportance = tf.keras.utils.Progbar(AL_results.iter_usage)
# tell us what we are doing
print('Testing sequence importance for')
print(
'prediction type: {}'.format(
pred_type
)
)
print(
'query variable: {}'.format(
AL_variable
)
)
print(
'query variant: {}'.format(
method
)
)
### Load model weights ###
# Note: if you load entire initial models, instead of their weights only,
# network configuration information is lost and tf will not train encoders
# alongside training the main prediction model.
models = load_encoder_and_predictor_weights(
raw_data,
models,
pred_type
)
### Start AL algorithm with random sequence selection ###
# initialize some values
data_budget = math.floor(
HYPER.DATA_BUDGET_ACT_LRN * candidate_dataset.n_datapoints
)
picked_cand_index_set = set()
available_index_set_update = AL_results.picked_cand_index_set
data_counter = 0
# start AL iterations
for iteration in range(AL_results.iter_usage):
### Set batch size ###
# compute the batch siz of this iteration
cand_batch_size = HYPER.CAND_BATCH_SIZE_ACT_LRN * data_budget
# if exceeding candidate data subsample, adjust batch siz
if HYPER.CAND_SUBSAMPLE_ACT_LRN is not None:
cand_batch_size = min(
cand_batch_size,
HYPER.CAND_SUBSAMPLE_ACT_LRN
)
# if exceeding data budget, adjust batch size
cand_batch_size = min(
cand_batch_size,
data_budget - data_counter
)
# transform cand_batch_siz to integer
cand_batch_size = int(cand_batch_size)
### Choose training batch ###
# Create a random splitting array
batch_index_list = random.sample(
available_index_set_update,
cand_batch_size
)
# update candidate indices and data counter
picked_cand_index_set = picked_cand_index_set.union(
set(batch_index_list)
)
picked_cand_index_list = list(picked_cand_index_set)
data_counter = len(picked_cand_index_list)
### Create training data ####
if HYPER.EXTEND_TRAIN_DATA_ACT_LRN:
# get share of training data from pool of possible testing points
X_t_ord_1D_new_train = np.concatenate(
(
train_data.X_t_ord_1D,
candidate_dataset.X_t_ord_1D[picked_cand_index_list]
),
axis=0
)
X_t_new_train = np.concatenate(
(
train_data.X_t,
candidate_dataset.X_t[picked_cand_index_list]
),
axis=0
)
X_s_new_train = np.concatenate(
(
train_data.X_s,
candidate_dataset.X_s[picked_cand_index_list]
),
axis=0
)
X_st_new_train = np.concatenate(
(
train_data.X_st,
candidate_dataset.X_st[picked_cand_index_list]
),
axis=0
)
Y_new_train = np.concatenate(
(
train_data.Y,
candidate_dataset.Y[picked_cand_index_list]
),
axis=0
)
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_train = np.concatenate(
(
train_data.X_s1,
candidate_dataset.X_s1[picked_cand_index_list]
),
axis=0
)
else:
X_s1_new_train = 0
else:
# sort all initial candidate data features with the same array
X_t_ord_1D_new_train = candidate_dataset.X_t_ord_1D[batch_index_list]
X_t_new_train = candidate_dataset.X_t[batch_index_list]
X_s_new_train = candidate_dataset.X_s[batch_index_list]
X_st_new_train = candidate_dataset.X_st[batch_index_list]
Y_new_train = candidate_dataset.Y[batch_index_list]
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_train = candidate_dataset.X_s1[batch_index_list]
else:
X_s1_new_train = 0
### Update picked_cand_index_list ###
# update candidate data if chosen so
if HYPER.RED_CAND_DATA_ACT_LRN:
# update set of available indices
available_index_set_update = (
available_index_set_update - picked_cand_index_set
)
### Create (updated) validation data ###
if HYPER.UPD_VAL_DATA_ACT_LRN:
# create new validation data by deleting the batch
X_t_ord_1D_new_val = np.delete(
candidate_dataset.X_t_ord_1D, picked_cand_index_list, 0
)
X_t_new_val = np.delete(
candidate_dataset.X_t, picked_cand_index_list, 0
)
X_s_new_val = np.delete(
candidate_dataset.X_s, picked_cand_index_list, 0
)
X_st_new_val = np.delete(
candidate_dataset.X_st, picked_cand_index_list, 0
)
Y_new_val = np.delete(
candidate_dataset.Y, picked_cand_index_list, 0
)
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_val = np.delete(
candidate_dataset.X_s1, picked_cand_index_list, 0
)
else:
X_s1_new_val = 0
else:
# create new validation data by copying initial candidates
X_t_ord_1D_new_val = candidate_dataset.X_t_ord_1D
X_t_new_val = candidate_dataset.X_t
X_s_new_val = candidate_dataset.X_s
X_st_new_val = candidate_dataset.X_st
Y_new_val = candidate_dataset.Y
if HYPER.SPATIAL_FEATURES != 'image':
X_s1_new_val = candidate_dataset.X_s1
else:
X_s1_new_val = 0
### Train with new batch ###
# bundle chosen batch of candidate data points as Dataset object
new_train_batch = Dataset(
X_t_ord_1D_new_train,
X_t_new_train,
X_s_new_train,
X_s1_new_train,
X_st_new_train,
Y_new_train
)
# bundle updated data points as Dataset object for validation. This
# avoids unwanted shuffling
new_val_data = Dataset(
X_t_ord_1D_new_val,
X_t_new_val,
X_s_new_val,
X_s1_new_val,
X_st_new_val,
Y_new_val
)
# train model with new data
train_hist_batch, val_hist_batch = train_model(
HYPER,
models.prediction_model,
new_train_batch,
new_val_data,
raw_data,
loss_object,
optimizer,
mean_loss
)
# keep track of loss histories
if iteration == 0:
train_hist = train_hist_batch
val_hist = val_hist_batch
else:
train_hist = np.concatenate((train_hist, train_hist_batch))
val_hist = np.concatenate((val_hist, val_hist_batch))
if not silent:
# increment progress bar
progbar_seqimportance.add(1)
### Create test dataset and predict ###
# create new validation data by deleting the batch of picked data from
# candidate dataset
X_t_ord_1D_test = np.delete(
candidate_dataset.X_t_ord_1D,
picked_cand_index_list,
0
)
X_t_test = np.delete(
candidate_dataset.X_t,
picked_cand_index_list,
0
)
X_s_test = np.delete(
candidate_dataset.X_s,
picked_cand_index_list,
0
)
X_st_test = np.delete(
candidate_dataset.X_st,
picked_cand_index_list,
0
)
Y_test = np.delete(
candidate_dataset.Y,
picked_cand_index_list,
0
)
if HYPER.SPATIAL_FEATURES != "image":
X_s1_test = np.delete(
candidate_dataset.X_s1,
picked_cand_index_list,
0
)
else:
X_s1_test = 0
# create a copy of candidate test data
test_data = Dataset(
X_t_ord_1D_test,
X_t_test,
X_s_test,
X_s1_test,
X_st_test,
Y_test
)
# Predict on candidate datapoints that are not in training data
title = '{} {} {}'.format(pred_type, AL_variable, method)
test_loss = test_model(
HYPER,
title,
models.prediction_model,
test_data,
raw_data,
mean_loss,
loss_function
)
AL_results.seqimportance_train_loss = train_hist
AL_results.seqimportance_val_loss = val_hist
AL_results.seqimportance_test_loss = test_loss
if not silent:
# Indicate termination of execute
print('---' * 20)
return AL_results
def vis_train_and_val(
HYPER,
AL_result_list,
PL_result_list,
RF_results
):
""" Plots training and validation loss histories of each method, sort
variable and prediction type against their passive learning benchmark
scenarios and the random forest baseline predictor. You can use between the
plotting options 'separate', 'both' and 'joint':
1. 'separate': plots the performance of each method separately against
the passive learning case
2. 'joint': plots the performance of all methods jointly against the
passive learning benchmark
3. 'both': plots both cases of 'separate' and 'joint'
"""
# choose the colormap
cmap = plt.cm.viridis
# create a list of colors, one color for each AL variant
color_list = cmap(np.linspace(0, 0.8, len(HYPER.QUERY_VARIANTS_ACT_LRN)))
n_methods = len(HYPER.QUERY_VARIANTS_ACT_LRN)
n_vars = len(HYPER.QUERY_VARIABLES_ACT_LRN)
for index_pred, pred_type in enumerate(HYPER.PRED_LIST_ACT_LRN):
# create a new figure for iterated prediction type
fig, ax = plt.subplots(n_vars, 2, figsize=(20, 10 * n_vars))
# get variable result list
var_result_list = AL_result_list[index_pred]
# get random results
PL_results = PL_result_list[index_pred]
# get baseline results
RF_loss = RF_results[pred_type]
for index_var, AL_variable in enumerate(HYPER.QUERY_VARIABLES_ACT_LRN):
### Plot method results for each sort variable ###
# plot random forest baseline results
ax[index_var, 1].axhline(
RF_loss,
color='r',
linestyle='--',
label='RF baseline',
)
### Plot PL results once per method for benchmark ###
train_loss = PL_results.train_loss
val_loss = PL_results.val_loss
legend_name = ('PL: {}s- {:.0%} budget'
'- {:.0%} sensors- {:.0%} times- {:.2} loss').format(
PL_results.iter_time,
PL_results.budget_usage,
PL_results.sensor_usage,
PL_results.streamtime_usage,
PL_results.test_loss,
)
ax[index_var, 0].plot(
train_loss,
color='b',
linestyle='--',
label=legend_name
)
ax[index_var, 1].plot(
val_loss,
color='b',
linestyle='--',
label=legend_name
)
# get method_result_list of currently iterated prediction type
method_result_list = var_result_list[index_var]
for index_method, method in enumerate(HYPER.QUERY_VARIANTS_ACT_LRN):
AL_result = method_result_list[index_method]
train_loss = AL_result.train_loss
val_loss = AL_result.val_loss
legend_name = ('AL {}: {}s- {:.0%} budget- {:.0%} '
'sensors- {:.0%} times- {:.2} loss').format(
method,
AL_result.iter_time,
AL_result.budget_usage,
AL_result.sensor_usage,
AL_result.streamtime_usage,
AL_result.test_loss,
)
ax[index_var, 0].plot(
train_loss,
color=color_list[index_method],
label=legend_name
)
ax[index_var, 1].plot(
val_loss,
color=color_list[index_method],
label=legend_name
)
sub_title = (
pred_type
+ ' predictions - query variable '
+ AL_variable
)
ax[index_var, 0].set_title(sub_title + ' training loss')
ax[index_var, 1].set_title(sub_title + ' validation loss')
ax[index_var, 0].set_ylabel('loss')
ax[index_var, 1].set_ylabel('loss')
ax[index_var, 0].set_xlabel('epoch')
ax[index_var, 1].set_xlabel('epoch')
ax[index_var, 0].legend(loc='best', frameon=False)
ax[index_var, 1].legend(loc='best', frameon=False)
def vis_seq_importance(
HYPER,
AL_result_list
):
""" Plots the training and validation losses for AL query sequence vs.
a random query sequence of the same data points that were queried using AL.
"""
if HYPER.TEST_SEQUENCE_IMPORTANCE:
cmap = plt.cm.viridis
# create a list of colors, one color for each AL variant
color_list = cmap(np.linspace(0, 0.8, len(HYPER.QUERY_VARIANTS_ACT_LRN)))
# create list of custom lines for custom legend
custom_lines = [
Line2D([0], [0], color=cmap(0.9), linestyle='--'),
Line2D([0], [0], color=cmap(0.9))
]
n_methods = len(HYPER.QUERY_VARIANTS_ACT_LRN)
n_vars = len(HYPER.QUERY_VARIABLES_ACT_LRN)
for index_pred, pred_type in enumerate(HYPER.PRED_LIST_ACT_LRN):
# create a new figure for iterated prediction type
fig, ax = plt.subplots(n_vars, 2, figsize=(20, 10 * n_vars))
# get variable result list
var_result_list = AL_result_list[index_pred]
for index_var, AL_variable in enumerate(HYPER.QUERY_VARIABLES_ACT_LRN):
### Plot method results for each sort variable ###
# get method_result_list of currently iterated prediction type
method_result_list = var_result_list[index_var]
for index_method, method in enumerate(
HYPER.QUERY_VARIANTS_ACT_LRN
):
AL_result = method_result_list[index_method]
train_loss = AL_result.train_loss
val_loss = AL_result.val_loss
train_loss_rnd_sequence = AL_result.seqimportance_train_loss
val_loss_rnd_sequence = AL_result.seqimportance_val_loss
ax[index_var, 0].plot(
train_loss,
color=color_list[index_method]
)
ax[index_var, 1].plot(
val_loss,
color=color_list[index_method]
)
ax[index_var, 0].plot(
train_loss_rnd_sequence,
linestyle='--',
color=color_list[index_method]
)
ax[index_var, 1].plot(
val_loss_rnd_sequence,
linestyle='--',
color=color_list[index_method]
)
sub_title = (
'query sequence importance for '
+ pred_type
+ ' predictions - query variable '
+ AL_variable
)
ax[index_var, 0].set_title(sub_title + " training")
ax[index_var, 1].set_title(sub_title + " validation")
ax[index_var, 0].set_ylabel("loss")
ax[index_var, 1].set_ylabel("loss")
ax[index_var, 0].set_xlabel("epoch")
ax[index_var, 1].set_xlabel("epoch")
ax[index_var, 0].legend(
custom_lines,
[
'AL data - random sequence',
'AL data - AL sequence'
],
loc='best',
frameon=False
)
ax[index_var, 1].legend(
custom_lines,
[
'AL data - random sequence',
'AL data - AL sequence'
],
loc='best',
frameon=False
)
def save_act_lrn_models(
HYPER,
raw_data,
AL_result_list,
PL_result_list
):
""" Saves the actively trained prediction models. """
if HYPER.SAVE_ACT_LRN_MODELS:
for index_pred, pred_type in enumerate(HYPER.PRED_LIST_ACT_LRN):
# get method_result_list of currently iterated prediction type
var_result_list = AL_result_list[index_pred]
# get random results
PL_results = PL_result_list[index_pred]
prediction_model = PL_results.prediction_model
# create the full path for saving random prediction model
saving_path = raw_data.path_to_AL_models + pred_type + '/'
if not os.path.exists(saving_path):
os.mkdir(saving_path)
path_to_model = saving_path + 'PL.h5'
# save currently iterated model
prediction_model.save(path_to_model)
for index_var, AL_variable in enumerate(HYPER.QUERY_VARIABLES_ACT_LRN):
# get variable result list
method_result_list = var_result_list[index_var]
for index_method, method in enumerate(
HYPER.QUERY_VARIANTS_ACT_LRN
):
# get result object and prediction model
AL_result = method_result_list[index_method]
prediction_model = AL_result.prediction_model
# create the full path for saving currently iterated model
path_to_model = (
saving_path
+ AL_variable
+ ' '
+ method
+ '.h5'
)
# save currently iterated model
prediction_model.save(path_to_model)
def save_act_lrn_results(
HYPER,
raw_data,
RF_results,
AL_result_list,
PL_result_list
):
""" Saves the active learning results, including number of iterations used,
time used for each iteration, share of data budget used, share of sensor
budget used, share of stream time budget used, testing loss baseline loss
and passive learning benchmark histories, validation histories and training
histories.
"""
if HYPER.SAVE_ACT_LRN_RESULTS:
for pred_index, pred_type in enumerate(HYPER.PRED_LIST_ACT_LRN):
saving_path = raw_data.path_to_AL_results + pred_type + '/'
if not os.path.exists(saving_path):
os.mkdir(saving_path)
path_to_results_file = saving_path + 'results.csv'
# create empty DataFrame
result_df = pd.DataFrame()
df_list = []
# baseline results
RF_loss = RF_results[pred_type]
# get method_result_list of currently iterated prediction type
var_result_list = AL_result_list[pred_index]
### Save PL results ###
# get PL results
PL_results = PL_result_list[pred_index]
n_iterations = PL_results.iter_usage
t_iterations = PL_results.iter_time
budget_usage = PL_results.budget_usage
sensor_usage = PL_results.sensor_usage
streamtime_usage = PL_results.streamtime_usage
test_loss = PL_results.test_loss
train_loss = PL_results.train_loss
val_loss = PL_results.val_loss
col_name_train = '{} {} {} train'.format(pred_type, None, 'PL')
col_name_val = '{} {} {} val'.format(pred_type, None, 'PL')
meta_entry = np.array(
[
n_iterations,
t_iterations,
budget_usage,
sensor_usage,
streamtime_usage,
RF_loss,
test_loss,
]
)
entry_train = np.concatenate((meta_entry, train_loss))
entry_val = np.concatenate((meta_entry, val_loss))
df_list.append(
pd.DataFrame({col_name_train: | pd.Series(entry_train) | pandas.Series |
"""
descriptive analysis - Utility functions to work with:
- mongoDB's result cursors
- pandas' DataFrames
Also contains as set of auxiliary internal functions and external routines
Members:
# time_series_analysis - performs time series analysis and returns a series DataFrame.
Corresponds each date with a number of attacks that happened
# time_series_analysis_per_month - performs time series analysis and returns a series DataFrame.
Corresponds each MONTH with a number of attacks that happened
# __high_charts_timestamp__ - post processes a datetime object and converts it to be suitable for
Javascript's Highcharts library
# __high_charts_timestamp_to_date__ reverse function of __high_charts_timestamp__. Returns a date in string
format.
# __analysis_data_frame_to_dict__ - converts DataFrames to a dictionary with key-value dictionary entries
# __analysis_list_to_dict__ - converts DataFrames to a dictionary with key-value dictionary entries
# update_time_series_analysis_files - merges analysis Datarames from file and DataFrames produced after the
last crawling. Then saves them in json and csv format files
# top_n - finds and returns the most common values for a given key along with the
number of appearances
# today_datetime - calculates the first and last valid datetime objects for the day
provided
"""
from collections import Counter
import pandas as pd
from datetime import date, datetime
import json
def time_series_analysis(results_cursor, mongo_date_type='mongoDate', entity_type=None):
""" Given a cursor that contains a query result set, this function performs time series analysis and returns
the result data frame.
@parameters
results_cursor (cursor) pymongo's result cursor. It is returned by a query
mongo_date_type (str) this parameter sets the datetime object based on which will take place the
time series analysis. Could be 'mongoDate' or 'mongoDate-CTI'.
entity_type (str) this is a special case ONLY for RANSOMWARE attacks. It can be 'IP' or 'URL'
or 'Domain' to narrow down and specify which records will be analyzed
@returns
s (pandas data frame) this data frame could be empty or contains a number of attacks for each day
"""
print("\nBegin Descriptive Analysis Phase... ", end='')
# process data if the collection is not empty
if results_cursor.count() == 0:
try:
raise Warning("No documents retrieved")
except Exception as e:
print("\ndescriptive_analysis module > time_series_analysis: ", e)
# return an empty DataFrame
s = pd.DataFrame()
return s
if entity_type in ['IP', 'URL', 'Domain']:
docs_of_interest = list([])
for doc in results_cursor.rewind():
if doc["Entity-Type"] == entity_type:
docs_of_interest.append(doc)
else:
# in any other case entity-type is an empty string by default
# so the whole collection is retrieved
docs_of_interest = [doc for doc in results_cursor.rewind()]
# aggregate dates
dates_list = list([])
for doc in docs_of_interest:
dates_list.append(doc[mongo_date_type])
''' Time Series '''
# a list of "1" to count the docs
ones = [1] * len(dates_list)
# the index of the series
idx = pd.DatetimeIndex(dates_list)
# the actual series (at series of 1s for the moment)
time_series = pd.Series(ones, index=idx)
# re-sampling / bucketing
per_day = time_series.resample('1D').sum().fillna(0)
# results data frame
s = pd.DataFrame(per_day)
print('\tCompleted')
print('\nDescriptive Analysis Results')
print("\nTime Series Head")
print(time_series.head())
print("\nPer Day Time Series")
print(per_day.head())
print("\nPer Day DataFrame")
print(s.head(), "\n")
return s
# -----------------------------------------------------------------------------------------------------------------#
def time_series_analysis_per_month(results_cursor, mongo_date_type='mongoDate', entity_type=None):
""" Given a cursor that contains a query result set, this function performs time series analysis and returns
the result data frame for analysed number of attacks per month.
@parameters
results_cursor (cursor) pymongo's result cursor. It is returned by a query
mongo_date_type (str) this parameter sets the datetime object based on which will take place the
time series analysis. Could be 'mongoDate' or 'mongoDate-CTI'.
entity_type (str) this is a special case ONLY for RANSOMWARE attacks. It can be 'IP' or 'URL'
or 'Domain' to narrow down and specify which records will be analyzed
@returns
s (pandas data frame) this data frame could be empty or contains a number of attacks for each day
"""
print("\nBegin Descriptive Analysis Phase... ", end='')
# process data if the collection is not empty
if results_cursor.count() == 0:
try:
raise Warning("No documents retrieved")
except Exception as e:
print("\ndescriptive_analysis module > time_series_analysis: ", e)
# return an empty DataFrame
s = pd.DataFrame()
return s
if entity_type in ['IP', 'URL', 'Domain']:
docs_of_interest = list([])
for doc in results_cursor.rewind():
if doc["Entity-Type"] == entity_type:
docs_of_interest.append(doc)
else:
# in any other case entity-type is an empty string by default
# so the whole collection is retrieved
docs_of_interest = [doc for doc in results_cursor.rewind()]
# aggregate dates
dates_list = list([])
for doc in docs_of_interest:
dates_list.append(doc[mongo_date_type])
''' Time Series '''
# a list of "1" to count the docs
ones = [1] * len(dates_list)
# the index of the series
idx = pd.DatetimeIndex(dates_list)
# the actual series (at series of 1s for the moment)
time_series = pd.Series(ones, index=idx)
# re-sampling / bucketing
per_month = time_series.resample('1M').sum().fillna(0)
# results data frame
s = | pd.DataFrame(per_month) | pandas.DataFrame |
# simple feature engineering from A_First_Model notebook in script form
import cudf
def see_percent_missing_values(df):
"""
reads in a dataframe and returns the percentage of missing data
Args:
df (dataframe): the dataframe that we are analysing
Returns:
percent_missing (dataframe): a dataframe with percentage missing for filtering
"""
total_missing = df.isnull().sum()/df.shape[0]
percent_missing = total_missing*100
return percent_missing.sort_values(ascending=False).round(1)
def basic_feature_engineering(train, test, gpu=False):
"""
reads in a train and test set of data and processes as per the basic
feature engineering example
Args:
train (dataframe): the training dataframe (should include TARGET)
test (dataframe): the testing dataframe
gpu (boolean): whether to use cudf or not
Returns:
train (dataframe): the processed train frame
test (dataframe): the processed test frame
train_target (dataframe): The training target column
"""
if gpu:
import cudf as dd
else:
import pandas as dd
app_train_mis_values = see_percent_missing_values(train)
df_app_train_miss_values= dd.DataFrame({'columns': app_train_mis_values.index,
'missing percent': app_train_mis_values.values})
if type(df_app_train_miss_values) == cudf.core.dataframe.DataFrame:
drop_columns = df_app_train_miss_values[df_app_train_miss_values['missing percent'] \
>= 40]['columns'].to_arrow().to_pylist()
else:
drop_columns = df_app_train_miss_values[df_app_train_miss_values['missing percent'] \
>= 40]['columns'].tolist()
train = train.drop(drop_columns, axis=1)
test = test.drop(drop_columns, axis=1)
train_target = train['TARGET']
train = train.drop('TARGET', axis=1)
# here we will use a basic dummy treatment
# we merged the dataframes first because when we dummify
# we could have some columns only in train or only in test. Merging first will prevent this
unified = dd.concat([train, test])
dummy_cols = unified.select_dtypes(['bool', 'O', 'category']).columns.tolist()
unified = | dd.get_dummies(unified, columns=dummy_cols, dtype='int64') | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
The data module contains tools for preprocessing data. It allows users to merge timeseries, compute
daily and monthly summary statistics, and get seasonal periods of a time series.
"""
from __future__ import division
import pandas as pd
from numpy import inf, nan
__all__ = ['julian_to_gregorian', 'merge_data', 'daily_average', 'daily_std_error', 'daily_std_dev', 'monthly_average',
'monthly_std_error', 'monthly_std_dev', 'remove_nan_df', 'seasonal_period']
def julian_to_gregorian(dataframe, frequency=None, inplace=False):
"""
Converts the index of the merged dataframe from julian float values to gregorian datetime
values.
Parameters
----------
dataframe: Pandas DataFrame
A DataFrame with an index of type float
frequency: string
Optional. Sometimes when converting from julian to gregorian there will be rounding errors
due to the inability of computers to store floats as perfect decimals. Providing the
frequency will automatically attempt to round the dates. A list of all the frequencies pandas provides is found
`here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases/>`_. Common frequencies
include daily ("D") and hourly ("H").
inplace: bool
Default False. If True, will modify the index of the dataframe in place rather than
creating a copy and returning the copy. Use when the time series are very long and making
a copy would take a large amount of memory
Returns
-------
Pandas DataFrame
A pandas DataFrame with gregorian index.
Examples
--------
>>> import pandas as pd
>>> import hydrostats.data as hd
>>> import numpy as np
>>> # The julian dates in an array
>>> julian_dates = np.array([2444239.5, 2444239.5416666665, 2444239.5833333335, 2444239.625,
>>> 2444239.6666666665, 2444239.7083333335, 2444239.75,
>>> 2444239.7916666665, 2444239.8333333335, 2444239.875])
>>> # Creating a test dataframe
>>> test_df = pd.DataFrame(data=np.random.rand(10, 2), # Random data in the columns
>>> columns=("Simulated Data", "Observed Data"),
>>> index=julian_dates)
>>> test_df
Simulated Data Observed Data
2.444240e+06 0.764719 0.126610
2.444240e+06 0.372736 0.141392
2.444240e+06 0.008645 0.686477
2.444240e+06 0.656825 0.480444
2.444240e+06 0.555247 0.869409
2.444240e+06 0.643896 0.549590
2.444240e+06 0.242720 0.799617
2.444240e+06 0.432421 0.185760
2.444240e+06 0.694631 0.136986
2.444240e+06 0.700422 0.390415
>>> # Making a new df with gregorian index
>>> test_df_gregorian = hd.julian_to_gregorian(test_df)
>>> test_df_gregorian
Simulated Data Observed Data
1980-01-01 00:00:00.000000 0.585454 0.457238
1980-01-01 01:00:00.028800 0.524764 0.083464
1980-01-01 01:59:59.971200 0.516821 0.416683
1980-01-01 03:00:00.000000 0.948483 0.553874
1980-01-01 04:00:00.028800 0.492280 0.232901
1980-01-01 04:59:59.971200 0.527967 0.296395
1980-01-01 06:00:00.000000 0.650018 0.212802
1980-01-01 07:00:00.028800 0.585592 0.802971
1980-01-01 07:59:59.971200 0.448243 0.665814
1980-01-01 09:00:00.000000 0.137395 0.201721
>>> # Rounding can be applied due to floating point inaccuracy
>>> test_df_gregorian_rounded = julian_to_gregorian(test_df, frequency="H") # Hourly Rounding Frequency
>>> test_df_gregorian_rounded
Simulated Data Observed Data
1980-01-01 00:00:00 0.309527 0.938991
1980-01-01 01:00:00 0.872284 0.497708
1980-01-01 02:00:00 0.168046 0.225845
1980-01-01 03:00:00 0.954494 0.275607
1980-01-01 04:00:00 0.875885 0.194380
1980-01-01 05:00:00 0.236849 0.992770
1980-01-01 06:00:00 0.639346 0.029808
1980-01-01 07:00:00 0.855828 0.903927
1980-01-01 08:00:00 0.638805 0.916124
1980-01-01 09:00:00 0.273430 0.443980
>>> # The DataFrame can also be modified in place, increasing efficiency with large time series
>>> julian_to_gregorian(test_df, inplace=True, frequency="H")
>>> test_df
Simulated Data Observed Data
1980-01-01 00:00:00 0.309527 0.938991
1980-01-01 01:00:00 0.872284 0.497708
1980-01-01 02:00:00 0.168046 0.225845
1980-01-01 03:00:00 0.954494 0.275607
1980-01-01 04:00:00 0.875885 0.194380
1980-01-01 05:00:00 0.236849 0.992770
1980-01-01 06:00:00 0.639346 0.029808
1980-01-01 07:00:00 0.855828 0.903927
1980-01-01 08:00:00 0.638805 0.916124
1980-01-01 09:00:00 0.273430 0.443980
"""
if inplace:
dataframe.index = pd.to_datetime(dataframe.index, origin="julian", unit="D")
if frequency is not None:
dataframe.index = dataframe.index.round(frequency)
else:
# Copying to avoid modifying the original dataframe
return_df = dataframe.copy()
# Converting the dataframe index from julian to gregorian
return_df.index = pd.to_datetime(return_df.index, origin="julian", unit="D")
if frequency is not None:
return_df.index = return_df.index.round(frequency)
return return_df
def merge_data(sim_fpath=None, obs_fpath=None, sim_df=None, obs_df=None, interpolate=None,
column_names=('Simulated', 'Observed'), simulated_tz=None, observed_tz=None, interp_type='pchip',
return_tz="Etc/UTC", julian=False, julian_freq=None):
"""Merges two dataframes or csv files, depending on the input.
Parameters
----------
sim_fpath: str
The filepath to the simulated csv of data. Can be a url if the page is formatted correctly.
The csv must be formatted with the dates in the left column and the data in the right
column.
obs_fpath: str
The filepath to the observed csv. Can be a url if the page is formatted correctly.
The csv must be formatted with the dates in the left column and the data in the right
column.
sim_df: DataFrame
A pandas DataFrame containing the simulated data. Must be formatted with a datetime index
and the simulated data values in column 0.
obs_df: DataFrame
A pandas DataFrame containing the simulated data. Must be formatted with a datetime index
and the simulated data values in column 0.
interpolate: str
Must be either 'observed' or 'simulated'. Specifies which data set you would like to
interpolate if interpolation is needed to properly merge the data.
column_names: tuple of str
Tuple of length two containing the column names that the user would like to set for the
DataFrame that is returned. Note that the simulated data will be in the left column and the
observed data will be in the right column
simulated_tz: str
The timezone of the simulated data. A full list of timezones can be found in the
:ref:`timezones`.
observed_tz: str
The timezone of the simulated data. A full list of timezones can be found in the
:ref:`timezones`.
interp_type: str
Which interpolation method to use. Uses the default pandas interpolater.
Available types are found at
http://pandas.pydata.org/pandas-docs/version/0.16.2/generated/pandas.DataFrame.interpolate.html
return_tz: str
What timezone the merged dataframe's index should be returned as. Default is 'Etc/UTC', which is recommended
for simplicity.
julian: bool
If True, will parse the first column of the file to a datetime index from julian floating point time
representation, this is only valid when supplying the sim_fpath and obs_fpath parameters. Users supplying two
DataFrame objects must convert the index from Julian to Gregorian using the julian_to_gregorian function in this
module
julian_freq: str
A string representing the frequency of the julian dates so that they can be rounded. See examples for usage.
Notes
-----
The only acceptable time frequencies in the data are 15min, 30min, 45min, and any number of hours or
days in between.
There are three scenarios to consider when merging your data:
1. The first scenario is that the timezones and the spacing of the time series matches
(eg. 1 Day). In this case, you will want to leave the simulated_tz, observed_tz, and
interpolate arguments empty, and the function will simply join the two csv's into a dataframe.
2. The second scenario is that you have two time series with matching time zones but not
matching spacing. In this case you will want to leave the simulated_tz and observed_tz empty,
and use the interpolate argument to tell the function which time series you would like to
interpolate to match the other time series.
3. The third scenario is that you have two time series with different time zones and possibly
different spacings. In this case you will want to fill in the simulated_tz, observed_tz, and
interpolate arguments. This will then take timezones into account when interpolating
the selected time series.
Examples
--------
>>> import hydrostats.data as hd
>>> import pandas as pd
>>> pd.options.display.max_rows = 15
The data URLs contain streamflow data from two different models, and are provided from the Hydrostats Github page
>>> sfpt_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/sfpt_data/magdalena-calamar_interim_data.csv'
>>> glofas_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/GLOFAS_Data/magdalena-calamar_ECMWF_data.csv'
>>> merged_df = hd.merge_data(sfpt_url, glofas_url, column_names=('Streamflow Prediction Tool', 'GLOFAS'))
"""
# Reading the data into dataframes if from file
if sim_fpath is not None and obs_fpath is not None:
# Importing data into a data-frame
sim_df_copy = pd.read_csv(sim_fpath, delimiter=",", header=None, names=[column_names[0]],
index_col=0, infer_datetime_format=True, skiprows=1)
obs_df_copy = pd.read_csv(obs_fpath, delimiter=",", header=None, names=[column_names[1]],
index_col=0, infer_datetime_format=True, skiprows=1)
# Converting the index to datetime type
if julian:
julian_to_gregorian(sim_df_copy, frequency=julian_freq, inplace=True)
julian_to_gregorian(obs_df_copy, frequency=julian_freq, inplace=True)
else:
sim_df_copy.index = pd.to_datetime(sim_df_copy.index, infer_datetime_format=True, errors='coerce')
obs_df_copy.index = pd.to_datetime(obs_df_copy.index, infer_datetime_format=True, errors='coerce')
elif sim_df is not None and obs_df is not None:
# Checking to make sure that both dataframes have datetime indices if they are not read from file.
if not isinstance(sim_df.index, pd.DatetimeIndex) and not isinstance(obs_df.index, pd.DatetimeIndex):
raise RuntimeError("Both the obs_df and the sim_df need to have a datetime index.")
# Copying the user supplied DataFrame objects
sim_df_copy = sim_df.copy()
obs_df_copy = obs_df.copy()
else:
raise RuntimeError('either sim_fpath and obs_fpath or sim_df and obs_df are required inputs.')
# Checking to see if the necessary arguments in the function are fulfilled
if simulated_tz is None and observed_tz is not None:
raise RuntimeError('Either Both Timezones are required or neither')
elif simulated_tz is not None and observed_tz is None:
raise RuntimeError('Either Both Timezones are required or neither')
elif simulated_tz is not None and observed_tz is not None and interpolate is None:
raise RuntimeError("You must specify with the interpolate parameter whether to interpolate the 'simulated' "
"or 'observed' data.")
elif simulated_tz is None and observed_tz is None and interpolate is None:
# Scenario 1
# Merging and joining the two DataFrames
merged_df = | pd.DataFrame.join(sim_df_copy, obs_df_copy) | pandas.DataFrame.join |
###############################################################################
##
## Copyright (C) 2020-2022, New York University.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of BugDoc.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import ast
import copy
import logging
import pandas as pd
import os
import random
import time
import zmq
from bugdoc.utils.utils import load_runs, numtests, load_combinatorial
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class AutoDebug(object):
def generate_data_interventions(self,bad_dataframe,good_dataframes):
def compute_score(row,b,g):
return min(row['score'],abs(row[b]-row[g]))
columns = good_dataframes[0].columns
max_diff_columns = {}
column_dataframes = {}
for c in columns:
df = pd.DataFrame()
df['bad'] = bad_dataframe[c]
first = True
for g in range(len(good_dataframes)):
good = good_dataframes[g]
df['good_' + str(g)] = good[c]
if first:
col_diff = bad_dataframe[c].sub(good[c], axis=0).abs()
df['score'] = col_diff
first = False
else:
df['score'] = df.apply(lambda row: compute_score(row, 'bad', 'good_' + str(g)), axis=1)
max_diff_columns[c] = max(df['score'])
df.sort_values(by=['score'], inplace=True, ascending=False)
column_dataframes[c] = df
column_order = [pair[0] for pair in sorted(max_diff_columns.items(), key=lambda item: item[1], reverse=True)]
return column_dataframes, column_order
def execute_intervention(self, dataframe, input_dict):
result = False
temp_dataset_file = os.path.join(os.path.dirname(input_dict['dataset']),"temp.csv")
input_dict['dataset'] = temp_dataset_file
dataframe.to_csv(temp_dataset_file, index=False)
requests = set()
exp = []
for param in self.my_inputs:
value = input_dict[param]
exp.append(value)
self.workflow(exp)
if self.is_poller_not_sync:
time.sleep(1)
self.is_poller_not_sync = False
requests.add(str(exp))
while len(requests) > 0:
socks = dict(self.poller.poll(10000))
if socks:
if socks.get(self.receiver) == zmq.POLLIN:
msg = self.receiver.recv_string(zmq.NOBLOCK)
exp = ast.literal_eval(msg)
requests.discard(str(exp[:-1]))
x = copy.deepcopy(exp)
x[-1] = eval(x[-1])
result = x[-1]
else:
for tup in requests:
exp = list(tup)
# TODO check if we need to resend experiments
# self.workflow(exp)
if self.is_poller_not_sync:
time.sleep(1)
self.is_poller_not_sync = False
return result
def replace_column(self,column, column_dataframes, bad_dataframe, input_dict):
bad_col = bad_dataframe[column]
bad_dataframe[column] = column_dataframes[column]['good_0']
# Test
result = self.execute_intervention(bad_dataframe, input_dict)
#roll back
bad_dataframe[column] = bad_col
return result
def replace_keys(self, column, column_dataframes, bad_dataframe, keys, input_dict):
bad_col = bad_dataframe[column]
bad_dataframe[column][keys] = column_dataframes[column]['good_0'][keys]
# Test
result = self.execute_intervention(bad_dataframe, input_dict)
# roll back
bad_dataframe[column] = bad_col
return result
def run(self, filename, input_dict, bad_dataset, good_datasets, outputs):
bad_dataframe = | pd.read_csv(bad_dataset) | pandas.read_csv |
"""Combine demand, hydro, wind, and solar traces into a single DataFrame"""
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
def _pad_column(col, direction):
"""Pad values forwards or backwards to a specified date"""
# Drop missing values
df = col.dropna()
# Convert to DataFrame
df = df.to_frame()
# Options that must change depending on direction in which to pad
if direction == 'forward':
keep = 'last'
new_index = pd.date_range(start=df.index[0], end='2051-01-01 00:00:00', freq='1H')
elif direction == 'backward':
keep = 'first'
new_index = pd.date_range(start='2016-01-01 01:00:00', end=df.index[-1], freq='1H')
else:
raise Exception(f'Unexpected direction: {direction}')
# Update index
df = df.reindex(new_index)
def _get_hour_of_year(row):
"""Get hour of year"""
# Get day of year - adjust by 1 minute so last timestamp (2051-01-01 00:00:00)
# is assigned to 2050. Note this timestamp actually corresponds to the interval
# 2050-12-31 23:00:00 to 2051-01-01 00:00:00
day_timestamp = row.name - pd.Timedelta(minutes=1)
# Day of year
day = day_timestamp.dayofyear
# Hour of year
hour = ((day - 1) * 24) + day_timestamp.hour + 1
return hour
# Hour of year
df['hour_of_year'] = df.apply(_get_hour_of_year, axis=1).to_frame('hour_of_year')
# Last year with complete data
fill_year = df.dropna(subset=[col.name]).drop_duplicates(subset=['hour_of_year'], keep=keep)
# DataFrame that will have values padded forward
padded = df.reset_index().set_index('hour_of_year')
# Pad using values from last year with complete data
padded.update(fill_year.set_index('hour_of_year'), overwrite=False)
# Set timestamp as index
padded = padded.set_index('index')
# Return series
padded = padded[col.name]
return padded
def pad_dataframe(col):
"""Apply padding - forwards and backwards for each column in DataFrame"""
# Pad values forwards
padded = _pad_column(col, direction='forward')
# Pad values backwards
padded = _pad_column(padded, direction='backward')
return padded
def format_wind_traces(data_dir):
"""Format wind traces"""
# Load wind traces
df = pd.read_hdf(os.path.join(data_dir, 'wind_traces.h5'))
# Reset index and pivot
df = df.reset_index().pivot(index='timestamp', columns='bubble', values='capacity_factor')
# Pad data forward
df = df.apply(pad_dataframe)
# Add level to column index
df.columns = | pd.MultiIndex.from_product([['WIND'], df.columns]) | pandas.MultiIndex.from_product |
"""
CMO-PMO Dashbaord report generation.
Reads daily metric data from blob storage and uploads
"""
import sys, time
import os
from datetime import datetime, date, timedelta
from pathlib import Path
import argparse
import pandas as pd
util_path = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'util'))
sys.path.append(util_path)
from utils import post_data_to_blob, create_json, get_tenant_info, get_data_from_blob, push_metric_event
def data_wrangling(result_loc_, date_):
"""
Extract last 30 days' daily metrics data from date of run.
:param result_loc_: Path object for file path
:param date_: datetime object for date sorting
:return: None
"""
df = | pd.read_csv(result_loc_) | pandas.read_csv |
import os
from datetime import datetime
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.layers import Activation, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
# loading in the data
df = pd.read_csv('33k_stripes_and_florals.csv')
df = df.set_index('id')
# labelling based on if it is floral or stripes
df['floral'] = np.where(df.keywords.str.contains('floral'), 1, 0)
df = df.drop(columns=['categories', 'type', 'contributer', 'description', 'keywords', 'image_urls'])
df = df.sample(frac=1) # shuffle
# add filenames
df['filenames'] = [str(x)+'.jpg' for x in df.index]
# split between train and test data
X_train, X_test, y_train, y_test = train_test_split(df.filenames, df.floral, test_size=0.25, random_state=24)
# Make one df for train and one for test since that is what the generators need
Xy_train = | pd.concat([X_train, y_train], axis=1) | pandas.concat |
'''
This file contains the ML-algorithms used to
operate on the data provided by the user
'''
import pandas as pd
import numpy as np
from flask import current_app
import os
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.naive_bayes import GaussianNB as GNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier as KNC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from mlgo.models import ResultSet
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import Normalizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from mlgo.datatraining.feature_selection import select_k_best
from mlgo.datatraining.feature_selection import variance_based
'''
Clean the data
if the target is of string type convert into classes
convert any string features into classes or continuous values
'''
class ML_Models():
data = ""
num_cols = 0
num_rows = 0
dataset_name = ""
def __init__(self, data_file):
filepath = os.path.join(current_app.root_path, 'static/data', data_file)
data = pd.read_csv(filepath, header=0)
if data.shape[1] == 1:
data = pd.read_csv(filepath, header=0, delimiter='\t')
data.reset_index()
self.data = data
columns = data.columns
for col in columns[:-1]:
try:
data[col] = data[col].astype('float64')
except:
data[col] = pd.factorize(data[col])[0]
try:
data[columns[-1]] = data[columns[-1]].astype('int64')
except:
data[columns[-1]] = pd.factorize(data[columns[-1]])[0]
self.data = data
self.dataset_name = data_file
self.num_rows = data.shape[0]
self.num_cols = data.shape[1]
def get_labels(self, data):
df = data
column_names = list(df)
df.columns = list(range(0, len(df.columns)))
features = df.drop(columns=[len(df.columns) -1])
labels = df.get(len(df.columns) -1)
features.columns = column_names[:-1]
labels.columns = column_names[-1]
return features, labels
def clean_data(self):
data = self.data
data.fillna(data.mean(), inplace=True)
data.fillna(data.median(), inplace=True)
data.fillna(data.mode(), inplace=True)
self.data = data
def scale_data(self, data, scaler='MinMaxScaler'):
if scaler is None:
return data
if scaler not in ['MinMaxScaler', 'Normalizer', 'Quantile_Transform']:
scaler = 'MinMaxScaler'
mmc = MinMaxScaler()
nm = Normalizer()
if scaler == 'MinMaxScaler':
print('In MinMaxScaler')
mmc.fit(data)
scaled_data = mmc.transform(data)
print(type(scaled_data))
return scaled_data
elif scaler == 'Normalizer':
scaled_data_temp = nm.fit(data)
scaled_data = scaled_data_temp.transform(data)
return scaled_data
elif scaler == 'Quantile_Transform':
return quantile_transform(data, n_quantiles=100, random_state=0)
def select_features(self, features, labels, params, algo="All"):
print("Algo : ",algo)
print("run1")
if algo is 'All' or algo not in ['Variance Based', 'K Best']:
return features, features.shape[1]
print('run2')
if algo == 'Variance Based':
print("In variance based")
try:
params = float(params)
except:
params = 0.0
if params < 0:
params = 0.0
new_features = variance_based(features, labels, params)
#new_test_f = variance_based(test_f, test_l, params)
return new_features, new_features.shape[1]
elif algo == 'K Best':
print("In k best")
try:
params = int(params)
except:
params = 10
new_features = select_k_best(features, labels, params)
#new_test_f = select_k_best(features, labels, params)
no_features = new_features.shape[1]
return new_features, no_features
print("End of function")
def decision_tree(self, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, scaler=None, feature_selection='All', p=0.0, test_train_split=0.3):
data = self.data
try:
test_train_split = float(test_train_split)
if test_train_split > 0.6:
test_train_split = 0.6
except:
print("in except")
test_train_split = 0.3
train, test = train_test_split(data, test_size=test_train_split)
print("\n\n", train.shape, "\n\n", test.shape, "\n\n")
train_features, train_labels = self.get_labels(train)
test_features, test_labels = self.get_labels(test)
features_list = []
#train_features, test_features, features_list = self.select_features(train_features, train_labels, test_features, test_labels, p, feature_selection)
train_features = self.scale_data(train_features, scaler=scaler)
test_features = self.scale_data(test_features, scaler=scaler)
train_labels = train_labels.values.reshape(train_features.shape[0], 1)
test_labels = test_labels.values.reshape(test_features.shape[0], 1)
print(train_features.shape)
print(train_labels.shape)
recon_train = np.hstack((train_features, train_labels))
recon_test = np.hstack((test_features, test_labels))
reconstructed_data = np.concatenate((recon_train, recon_test))
reconstructed_data_df = pd.DataFrame(reconstructed_data, index=None)
re_features = reconstructed_data_df.drop(columns=[len(reconstructed_data_df.columns) - 1])
re_labels = reconstructed_data_df.get(len(reconstructed_data_df.columns) - 1)
selected_feat, num_feat = self.select_features(re_features, re_labels, params=p, algo=feature_selection)
selected_feat = pd.DataFrame(selected_feat, index=None)
data = pd.concat([selected_feat, re_labels], axis=1)
train, test = train_test_split(data, test_size=test_train_split)
train_features, train_labels = self.get_labels(train)
test_features, test_labels = self.get_labels(test)
if criterion != 'gini' or criterion != 'entropy':
criterion = 'gini'
if max_depth == '' or max_depth is None:
max_depth = None
else:
max_depth = int(max_depth)
clf = DTC(criterion=criterion,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
clf.fit(train_features, train_labels)
predictions = clf.predict(test_features)
accuracy = accuracy_score(test_labels, predictions)
rs = ResultSet()
rs.algo_name = 'Decision Tree'
rs.dataset_name = self.dataset_name
rs.accuracy = round(accuracy, 4)
rs.train_test_split = test_train_split
rs.normalization = scaler
rs.no_of_features = num_feat
return rs
def svm(self, c=1.0, kernel='rbf', gamma='auto', max_iter=-1, scaler=None, feature_selection='All',p=0.0, test_train_split=0.3):
data = self.data
try:
test_train_split = float(test_train_split)
if test_train_split > 0.6:
test_train_split = 0.6
except:
test_train_split = 0.3
train, test = train_test_split(data, test_size=test_train_split)
train_features, train_labels = self.get_labels(train)
test_features, test_labels = self.get_labels(test)
features_list = []
# train_features, test_features, features_list = self.select_features(train_features, train_labels, test_features,
# test_labels, p, feature_selection)
train_features = self.scale_data(train_features, scaler=scaler)
test_features = self.scale_data(test_features, scaler=scaler)
train_labels = train_labels.values.reshape(train_features.shape[0], 1)
test_labels = test_labels.values.reshape(test_features.shape[0], 1)
print(train_features.shape)
print(train_labels.shape)
recon_train = np.hstack((train_features, train_labels))
recon_test = np.hstack((test_features, test_labels))
reconstructed_data = np.concatenate((recon_train, recon_test))
reconstructed_data_df = | pd.DataFrame(reconstructed_data, index=None) | pandas.DataFrame |
import pandas as pd
def unfold(df,s):
df=df[s].values
lst=[]
for i in df:
dic={}
for j in range(len(i)):
dic[j]=i[j]
lst.append(dic)
return pd.DataFrame(lst)
def load_raw_data(file_path,vectorizer,dataset_index):
if dataset_index==2:
df = pd.read_pickle(file_path)
title=pd.DataFrame(vectorizer.get_text_feature( df['title'].values))
body=pd.DataFrame(vectorizer.get_text_feature( df['body'].values))
comment_text=pd.DataFrame(vectorizer.get_text_feature( df['comment'].values))
issue_num=df[["LengthOfTitle","LengthOfDescription","NumOfUrls","NumOfPics","NumOfCode","PositiveWords","NegativeWords",
"coleman_liau_index","flesch_reading_ease","flesch_kincaid_grade","automated_readability_index"]]
rpt=df[["rptallpr","rptpr","rptcmt","rptallcmt","rptpronum","rptstar",
"rptfoll","rptalliss",'rpthascomment', 'rptisnew', 'rpthasevent',"rptnpratio","rptissnum"]]
comnum=df[["commentnum","labels"]]
lc=unfold(df,"labelcategory")
le=unfold(df,"labelevent")
event=unfold(df,"event").iloc[:,11:36]
event_experience=unfold(df,"event").iloc[:,:11]
T=pd.concat([title,body,comment_text,issue_num,rpt,comnum,lc,le,event,event_experience],axis=1)
c=[]
for i in range(title.shape[1]):
c.append("title"+str(i))
for i in range(body.shape[1]):
c.append("description"+str(i))
for i in range(comment_text.shape[1]):
c.append("com"+str(i))
c+=list(issue_num.columns)
c+=list(rpt.columns)
c+=list(comnum.columns)
for i in range(lc.shape[1]):
c.append("lc"+str(i))
for i in range(le.shape[1]):
c.append("le"+str(i))
for i in range(event.shape[1]):
c.append("event"+str(i))
for i in range(event_experience.shape[1]):
c.append("event_ex"+str(i))
T.columns=c
elif dataset_index==1:
df = pd.read_pickle(file_path)
title=pd.DataFrame(vectorizer.get_text_feature( df['title'].values))
body=pd.DataFrame(vectorizer.get_text_feature( df['body'].values))
issue_num=df[["LengthOfTitle","LengthOfDescription","NumOfUrls","NumOfPics","NumOfCode","PositiveWords","NegativeWords",
"coleman_liau_index","flesch_reading_ease","flesch_kincaid_grade","automated_readability_index"]]
rpt=df[["rptallpr","rptpr","rptcmt","rptallcmt","rptpronum","rptstar","rptfoll","rptalliss",'rptisnew',
"rptnpratio","rptissnum","labels"]]
lc=unfold(df,"labelcategory")
T=pd.concat([title,body,issue_num,rpt,lc],axis=1)
c=[]
for i in range(title.shape[1]):
c.append("title"+str(i))
for i in range(body.shape[1]):
c.append("description"+str(i))
c+=list(issue_num.columns)
c+=list(rpt.columns)
for i in range(lc.shape[1]):
c.append("lc"+str(i))
T.columns=c
return T
def load_train_test_data(file_path1,vectorizer,dataset_index,fold,sorted,crosspro):
T= load_raw_data(file_path1,vectorizer,dataset_index)
T.dropna(inplace=True)
p_train_split1=int((fold/10)*T.shape[0])
p_train_split2=int((fold/10+0.1)*T.shape[0])
train_data1=T.iloc[:p_train_split1]
train_data2=T.iloc[p_train_split2:]
train_data=pd.concat([train_data1,train_data2],axis=0)
test_data=T.iloc[p_train_split1:p_train_split2]
p_train = train_data[train_data.labels == 1]
p_train = p_train.sample(frac=15000/p_train.shape[0],replace=True,random_state=0)
n_train = train_data[train_data.labels == 0]
n_train=n_train.sample(frac=15000/n_train.shape[0],replace=True,random_state=0)
train_data= | pd.concat([p_train,n_train],ignore_index=True) | pandas.concat |
from flask import Flask, render_template, request, flash, redirect, url_for, send_file
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from matplotlib import pyplot
from matplotlib.patches import Rectangle
from werkzeug.utils import secure_filename
import os
import pandas as pd
import time
df=pd.DataFrame()
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
UPLOAD_FOLDER = 'static/uploads/'
application = Flask(__name__, static_folder = UPLOAD_FOLDER)
application.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
aisles = []
spaces = []
class TestConfig(Config):
NAME = "void_cfg"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 1
rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
rcnn.load_weights('mask_rcnn_void_cfg_00052.h5', by_name=True)
rcnn.keras_model._make_predict_function()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def draw_image_with_boxes(filename, boxes_list):
fig = pyplot.figure(figsize=(15, 15))
fig.add_subplot(1, 2, 1)
data = pyplot.imread(filename)
pyplot.imshow(data)
fig.add_subplot(1, 2, 2)
pyplot.imshow(data)
ax = pyplot.gca()
for box in boxes_list:
y1, x1, y2, x2 = box
width, height = x2 - x1, y2 - y1
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
ax.add_patch(rect)
aisles.append(filename)
spaces.append(len(boxes_list))
dict = {'aisle name': aisles, 'void number': spaces}
df = | pd.DataFrame(dict) | pandas.DataFrame |
'''
Train and evaluate binary classifier
Produce a human-readable HTML report with performance plots and metrics
Usage
-----
```
python eval_classifier.py {classifier_name} --output_dir /path/ \
{clf_specific_kwargs} {data_kwargs} {protocol_kwargs}
```
For detailed help message:
```
python eval_classifier.py {classifier_name} --help
```
Examples
--------
TODO
----
* Save classifiers in reproducible format on disk (ONNX??)
* Add reporting for calibration (and perhaps adjustment to improve calibration)
'''
import argparse
import ast
import json
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import glob
from yattag import Doc
import sklearn.linear_model
import sklearn.tree
import sklearn.ensemble
from custom_classifiers import ThresholdClassifier
from sklearn.metrics import (accuracy_score, balanced_accuracy_score, f1_score,
average_precision_score, confusion_matrix, log_loss,
roc_auc_score, roc_curve, precision_recall_curve)
from sklearn.model_selection import GridSearchCV, ShuffleSplit
from split_dataset import Splitter
from utils_scoring import (THRESHOLD_SCORING_OPTIONS, calc_score_for_binary_predictions)
from utils_calibration import plot_binary_clf_calibration_curve_and_histograms
def get_sorted_list_of_kwargs_specific_to_group_parser(group_parser):
keys = [a.option_strings[0].replace('--', '') for a in group_parser._group_actions]
return [k for k in sorted(keys)]
DEFAULT_PROJECT_REPO = os.path.sep.join(__file__.split(os.path.sep)[:-2])
PROJECT_REPO_DIR = os.path.abspath(
os.environ.get('PROJECT_REPO_DIR', DEFAULT_PROJECT_REPO))
default_json_dir = os.path.join(PROJECT_REPO_DIR, 'src', 'default_hyperparameters')
if not os.path.exists(default_json_dir):
raise ValueError("Could not read default hyperparameters from file")
DEFAULT_SETTINGS_JSON_FILES = glob.glob(os.path.join(default_json_dir, '*.json'))
if len(DEFAULT_SETTINGS_JSON_FILES) == 0:
raise ValueError("Could not read default hyperparameters from file")
try:
TEMPLATE_HTML_PATH = os.path.join(PROJECT_REPO_DIR, 'src', 'template.html')
except KeyError:
TEMPLATE_HTML_PATH = None
FIG_KWARGS = dict(
figsize=(4, 4),
tight_layout=True)
if __name__ == '__main__':
# Parse pre-specified command line arguments
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title="clf_name", dest="clf_name")
subparsers_by_name = dict()
for json_file in DEFAULT_SETTINGS_JSON_FILES:
with open(json_file, 'r') as f:
defaults = json.load(f)
clf_name = os.path.basename(json_file).split('.')[0]
clf_parser = subparsers.add_parser(clf_name)
default_group = clf_parser.add_argument_group('fixed_clf_settings')
filter_group = clf_parser.add_argument_group('filter_hypers')
hyperparam_group = clf_parser.add_argument_group('hyperparameters')
for key, val in defaults.items():
if key.count('constructor'):
assert val.count(' ') == 0
assert val.startswith('sklearn')
for ii, name in enumerate(val.split('.')):
if ii == 0:
mod = globals().get(name)
else:
mod = getattr(mod, name)
clf_parser.add_argument('--clf_constructor', default=mod)
elif isinstance(val, list):
if key.startswith('grid_'):
hyperparam_group.add_argument("--%s" % key, default=val, type=type(val[0]), nargs='*')
else:
default_group.add_argument("--%s" % key, default=val, type=type(val[0]), nargs='*')
else:
has_simple_type = isinstance(val, str) or isinstance(val, int) or isinstance(val, float)
assert has_simple_type
if key.startswith('grid_'):
hyperparam_group.add_argument("--%s" % key, default=val, type=type(val))
elif key.startswith('filter__'):
filter_group.add_argument("--%s" % key, default=val, type=type(val))
else:
default_group.add_argument("--%s" % key, default=val, type=type(val))
subparsers_by_name[clf_name] = clf_parser
'''
logistic_parser = subparsers.add_parser('logistic')
logistic_parser.set_defaults(clf=LogisticRegression,
default_clf_args={'solver': 'lbfgs',
'multi_class': 'auto'})
logistic_parser.add_argument('--grid_C', type=float, nargs='*', default=[1])
dtree_parser = subparsers.add_parser('dtree')
dtree_parser.set_defaults(clf=DecisionTreeClassifier, default_clf_args={})
dtree_parser.add_argument('--grid_max_depth', type=int, nargs='*',
default=[None])
rforest_parser = subparsers.add_parser('rforest')
rforest_parser.set_defaults(clf=RandomForestClassifier, default_clf_args={})
rforest_parser.add_argument('--grid_n_estimators', type=int, nargs='*',
default=[10])
rforest_parser.add_argument('--grid_max_depth', type=int, nargs='*',
default=[None])
mlp_parser = subparsers.add_parser('mlp')
mlp_parser.set_defaults(clf=MLPClassifier, default_clf_args={})
# ast.literal_eval evaluates strings, converting to a tuple in this case
# (may need to put tuples in quotes for command line)
mlp_parser.add_argument('--grid_hidden_layer_sizes', type=ast.literal_eval,
nargs='*', default=[(100,)])
mlp_parser.add_argument('--grid_alpha', type=float, nargs='*', default=[0.0001])
'''
for p in subparsers_by_name.values():
data_group = p.add_argument_group('data')
data_group.add_argument('--train_csv_files', type=str, required=True)
data_group.add_argument('--test_csv_files', type=str, required=True)
data_group.add_argument('--data_dict_files', type=str, required=True)
data_group.add_argument('--output_dir', default='./html/', type=str, required=False)
protocol_group = p.add_argument_group('protocol')
protocol_group.add_argument('--outcome_col_name', type=str, required=False)
protocol_group.add_argument('--validation_size', type=float, default=0.1)
protocol_group.add_argument('--key_cols_to_group_when_splitting', type=str,
default=None, nargs='*')
protocol_group.add_argument('--scoring', type=str, default='roc_auc_score')
protocol_group.add_argument('--random_seed', type=int, default=8675309)
protocol_group.add_argument('--n_splits', type=int, default=1)
protocol_group.add_argument('--threshold_scoring', type=str,
default=None, choices=[None, 'None'] + THRESHOLD_SCORING_OPTIONS)
#p.add_argument('-a-ts_dir', required=True)
#p.add_argument('--data_dict', required=True)
#p.add_argument('--static_files', nargs='*')
args, unknown_args = parser.parse_known_args()
fig_dir = os.path.abspath(args.output_dir)
# key[5:] strips the 'grid_' prefix from the argument
argdict = vars(args)
raw_param_grid = {
key[5:]: argdict[key] for key in argdict if key.startswith('grid_')}
# Parse unspecified arguments to be passed through to the classifier
def auto_convert_str(x):
try:
x_float = float(x)
x_int = int(x_float)
if x_int == x_float:
return x_int
else:
return x_float
except ValueError:
return x
# Import data
feature_cols = []
outcome_cols = []
df_by_split = dict()
for split_name, csv_files in [('train', args.train_csv_files.split(',')), ('test', args.test_csv_files.split(','))]:
cur_df = None
for csv_file, data_dict_file in zip(csv_files, args.data_dict_files.split(',')):
with open(data_dict_file, 'r') as f:
data_fields = json.load(f)['schema']['fields']
key_cols = [c['name'] for c in data_fields if c['role'] in ('key', 'id')]
feature_cols.extend([
c['name'] for c in data_fields if (
c['role'].lower() in ('feature', 'covariate', 'measurement')
and c['name'] not in feature_cols)])
outcome_cols.extend([
c['name'] for c in data_fields if (
c['role'].lower() in ('output', 'outcome')
and c['name'] not in feature_cols)])
# TODO use json data dict to load specific columns as desired types
more_df = pd.read_csv(csv_file)
if cur_df is None:
cur_df = more_df
else:
cur_df = cur_df.merge(more_df, on=key_cols)
df_by_split[split_name] = cur_df
'''
data_dict = json.load(open(args.data_dict))
train = pd.read_csv(args.ts_dir + '/train.csv')
test = pd.read_csv(args.ts_dir + '/test.csv')
if args.static_files:
for f in args.static_files:
static = pd.read_csv(f)
join_cols = [c['name'] for c in data_dict['fields']
if c['role'] == 'id' and c['name'] in static.columns
and c['name'] in train.columns]
train = train.merge(static, on=join_cols)
test = test.merge(static, on=join_cols)
feature_cols = [c['name'] for c in data_dict['fields']
if c['role'] == 'feature' and c['name'] in train]
outcome_col = [c['name'] for c in data_dict['fields']
if c['role'] == 'outcome' and c['name'] in train]
'''
outcome_col_name = args.outcome_col_name
if outcome_col_name is None:
if len(outcome_cols) > 1:
raise Exception('Data has multiple outcome column, need to select one via --outcome_col_name')
elif len(outcome_cols) == 0:
raise Exception("Data has no outcome columns. Need to label at least one with role='outcome'")
outcome_col_name = outcome_cols[0]
if outcome_col_name not in outcome_cols:
raise Exception("Selected --outcome_col_name=%s not labeled in data_dict with role='outcome'" % (
outcome_col_name))
# Prepare data for classification
x_train = df_by_split['train'][feature_cols].values
y_train = np.ravel(df_by_split['train'][outcome_col_name])
x_test = df_by_split['test'][feature_cols].values
y_test = np.ravel(df_by_split['test'][outcome_col_name])
fixed_args = {}
fixed_group = None
for g in subparsers_by_name[args.clf_name]._action_groups:
if g.title.count('fixed'):
fixed_group = g
break
for key in get_sorted_list_of_kwargs_specific_to_group_parser(fixed_group):
val = vars(args)[key]
if isinstance(val, str):
if val.lower() == 'none':
val = None
fixed_args[key] = val
passthrough_args = {}
for i in range(len(unknown_args)):
arg = unknown_args[i]
if arg.startswith('--'):
val = unknown_args[i+1]
passthrough_args[arg[2:]] = auto_convert_str(val)
# Perform hyper_searcher search
n_examples = int(np.ceil(x_train.shape[0] * (1 - args.validation_size)))
n_features = x_train.shape[1]
param_grid = dict()
for key, grid in raw_param_grid.items():
fkey = 'filter__' + key
if fkey in argdict:
filter_func = eval(argdict[fkey])
filtered_grid = np.unique([filter_func(g, n_examples, n_features) for g in grid]).tolist()
else:
filtered_grid = np.unique(grid).tolist()
if len(filtered_grid) == 0:
raise Warning("Bad grid for parameter: %s")
elif len(filtered_grid) == 1:
fixed_args[key] = filtered_grid[0]
raise Warning("Skipping parameter %s with only one grid value")
else:
param_grid[key] = filtered_grid
# Create classifier object
clf = args.clf_constructor(
random_state=int(args.random_seed), **fixed_args, **passthrough_args)
splitter = Splitter(
size=args.validation_size, random_state=args.random_seed,
n_splits=args.n_splits, cols_to_group=args.key_cols_to_group_when_splitting)
hyper_searcher = GridSearchCV(
clf, param_grid,
scoring=args.scoring, cv=splitter, refit=True, return_train_score=True)
key_train = splitter.make_groups_from_df(df_by_split['train'][key_cols])
hyper_searcher.fit(x_train, y_train, groups=key_train)
# Pretty tables for results of hyper_searcher search
cv_perf_df = pd.DataFrame(hyper_searcher.cv_results_)
tr_split_keys = ['mean_train_score'] + ['split%d_train_score' % a for a in range(args.n_splits)]
te_split_keys = ['mean_test_score'] + ['split%d_test_score' % a for a in range(args.n_splits)]
cv_tr_perf_df = cv_perf_df[['params'] + tr_split_keys].copy()
cv_te_perf_df = cv_perf_df[['params'] + te_split_keys].copy()
# Threshold search
# TODO make cast wider net at nearby settings to the best estimator??
if str(args.threshold_scoring) != 'None':
# hyper_searcher search on validation over possible threshold values
# Make sure all candidates at least provide
# one instance of each class (positive and negative)
yproba_class1_vals = list()
for tr_inds, va_inds in splitter.split(x_train, groups=key_train):
x_valid = x_train[va_inds]
yproba_valid = hyper_searcher.best_estimator_.predict_proba(x_valid)[:,1]
yproba_class1_vals.extend(yproba_valid)
unique_yproba_vals = np.unique(yproba_class1_vals)
if unique_yproba_vals.shape[0] == 1:
nontrivial_thr_vals = unique_yproba_vals
else:
# Try all thr values that would give at least one pos and one neg decision
nontrivial_thr_vals = np.unique(unique_yproba_vals)[1:-1]
if nontrivial_thr_vals.size > 100:
# Too many for possible thr values for typical compute power
# Cover the space of typical computed values well
# But also include some extreme values
dense_thr_grid = np.linspace(
np.percentile(nontrivial_thr_vals, 5),
np.percentile(nontrivial_thr_vals, 95),
90)
extreme_thr_grid = np.linspace(
nontrivial_thr_vals[0],
nontrivial_thr_vals[-1],
10)
thr_grid = np.unique(np.hstack([extreme_thr_grid, dense_thr_grid]))
else:
# Seems feasible to just look at all possible thresholds
# that give distinct operating points.
thr_grid = nontrivial_thr_vals
print("Searching thresholds...")
if thr_grid.shape[0] > 3:
print("thr_grid = %.4f, %.4f, %.4f ... %.4f, %.4f" % (
thr_grid[0], thr_grid[1], thr_grid[2], thr_grid[-2], thr_grid[-1]))
## TODO find better way to do this fast
# So we dont need to call fit at each thr value
score_grid_SG = np.zeros((splitter.n_splits, thr_grid.size))
for ss, (tr_inds, va_inds) in enumerate(
splitter.split(x_train, y_train, groups=key_train)):
x_tr = x_train[tr_inds].copy()
y_tr = y_train[tr_inds].copy()
x_va = x_train[va_inds]
y_va = y_train[va_inds]
tmp_clf = ThresholdClassifier(hyper_searcher.best_estimator_)
tmp_clf.fit(x_tr, y_tr)
for gg, thr in enumerate(thr_grid):
tmp_clf = tmp_clf.set_params(threshold=thr)
yhat = tmp_clf.predict(x_va)
score_grid_SG[ss, gg] = calc_score_for_binary_predictions(y_va, yhat, scoring=args.threshold_scoring)
avg_score_G = np.mean(score_grid_SG, axis=0)
# Do a 2nd order quadratic fit to the scores
# Focusing weight on points near the maximizer
# This gives us a "smoothed" function mapping thresholds to scores
# avoids issues if scores are "wiggly" we don't want to rely too much on max
# OR will do right thing when there are many thresholds that work
# Using smoothed scores guarantees we get maximizer in the middle of that range
weights_G = np.exp(-10.0 * np.abs(avg_score_G - np.max(avg_score_G)))
poly_coefs = np.polyfit(thr_grid, avg_score_G, 2, w=weights_G)
smoothed_score_G = np.polyval(poly_coefs, thr_grid)
gg = np.argmax(smoothed_score_G)
# Keep best scoring estimator
best_thr = thr_grid[gg]
print("Chosen Threshold: %.4f" % best_thr)
best_clf = ThresholdClassifier(hyper_searcher.best_estimator_, threshold=best_thr)
else:
best_clf = hyper_searcher.best_estimator_
# Evaluation
row_dict_list = list()
extra_list = list()
for split_name, x, y in [
('train', x_train, y_train),
('test', x_test, y_test)]:
row_dict = dict(split_name=split_name, n_examples=x.shape[0], n_labels_positive=np.sum(y))
row_dict['frac_labels_positive'] = np.sum(y) / x.shape[0]
y_pred = best_clf.predict(x)
y_pred_proba = best_clf.predict_proba(x)[:, 1]
confusion_arr = confusion_matrix(y, y_pred)
cm_df = pd.DataFrame(confusion_arr, columns=[0,1], index=[0,1])
cm_df.columns.name = 'Predicted label'
cm_df.index.name = 'True label'
accuracy = accuracy_score(y, y_pred)
balanced_accuracy = balanced_accuracy_score(y, y_pred)
log2_loss = log_loss(y, y_pred_proba, normalize=True) / np.log(2)
row_dict.update(dict(confusion_html=cm_df.to_html(), cross_entropy_base2=log2_loss, accuracy=accuracy, balanced_accuracy=balanced_accuracy))
f1 = f1_score(y, y_pred)
avg_precision = average_precision_score(y, y_pred_proba)
roc_auc = roc_auc_score(y, y_pred_proba)
row_dict.update(dict(f1_score=f1, average_precision=avg_precision, AUROC=roc_auc))
npv, ppv = np.diag(cm_df.values) / cm_df.sum(axis=0)
tnr, tpr = np.diag(cm_df.values) / cm_df.sum(axis=1)
row_dict.update(dict(TPR=tpr, TNR=tnr, PPV=ppv, NPV=npv))
row_dict.update(dict(tn=cm_df.values[0,0], fp=cm_df.values[0,1], fn=cm_df.values[1,0], tp=cm_df.values[1,1]))
# Plots
B = 0.03
plot_binary_clf_calibration_curve_and_histograms(y, y_pred_proba, bins=11, B=B)
plt.savefig(os.path.join(fig_dir, '{split_name}_calibration_curve.png'.format(split_name=split_name)))
plt.close()
# ROC curve
roc_fpr, roc_tpr, _ = roc_curve(y, y_pred_proba)
fig_h = plt.figure(**FIG_KWARGS)
ax = plt.gca()
ax.plot(roc_fpr, roc_tpr, 'b.-')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_xlim([-B, 1.0 + B])
ax.set_ylim([-B, 1.0 + B])
plt.savefig(os.path.join(fig_dir, '{split_name}_roc_curve.png'.format(split_name=split_name)))
plt.close()
# PR curve
# ordered with *decreasing* recall
precision, recall, _ = precision_recall_curve(y, y_pred_proba)
# To compute area under curve, make sure we integrate with *increasing* recall
row_dict['AUPRC'] = np.trapz(precision[::-1], recall[::-1])
fig_h = plt.figure(**FIG_KWARGS)
ax = plt.gca()
ax.plot(recall, precision, 'b.-')
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_xlim([-B, 1.0 + B])
ax.set_ylim([-B, 1.0 + B])
plt.savefig(os.path.join(fig_dir, '{split_name}_pr_curve.png'.format(split_name=split_name)))
plt.close()
row_dict_list.append(row_dict)
perf_df = pd.DataFrame(row_dict_list)
perf_df = perf_df.set_index('split_name')
# Set up HTML report
try:
os.mkdir(fig_dir)
except OSError:
pass
os.chdir(fig_dir)
doc, tag, text = Doc().tagtext()
pd.set_option('precision', 4)
with tag('html'):
if os.path.exists(TEMPLATE_HTML_PATH):
with open(TEMPLATE_HTML_PATH, 'r') as f:
for line in f.readlines():
doc.asis(line)
with tag('div', klass="container-fluid text-center"):
with tag('div', klass='row content'):
with tag('div', klass="col-sm-1 sidenav"):
text("")
with tag('div', klass="col-sm-10 text-left"):
with tag('h3'):
text('Hyperparameters of best model')
with tag('pre'):
text(str(best_clf))
with tag('h3'):
with tag('a', name='results-data-summary'):
text('Input Data Summary')
doc.asis(perf_df[['n_examples', 'n_labels_positive', 'frac_labels_positive']].to_html())
with tag('h3'):
with tag('a', name='results-performance-plots'):
text('Performance Plots')
with tag('table'):
with tag('tr'):
with tag('th', **{'text-align':'center'}):
text('Train')
with tag('th', **{'text-align':'center'}):
text('Test')
with tag('tr'):
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'train_roc_curve.png'), width=400)
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'test_roc_curve.png'), width=400)
with tag('tr'):
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'train_pr_curve.png'), width=400)
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'test_pr_curve.png'), width=400)
with tag('tr'):
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'train_calibration_curve.png'), width=400)
with tag('td', align='center'):
doc.stag('img', src=os.path.join(fig_dir, 'test_calibration_curve.png'), width=400)
with tag('tr'):
with tag('td', align='center', **{'text-align':'center'}):
doc.asis(str(perf_df.iloc[0][['confusion_html']].values[0]).replace('<', '<').replace('>', '>').replace('\\n', ''))
with tag('td', align='center', **{'text-align':'center'}):
doc.asis(str(perf_df.iloc[1][['confusion_html']].values[0]).replace('<', '<').replace('>', '>').replace('\\n', ''))
with tag('h3'):
with tag('a', name='results-performance-metrics-proba'):
text('Performance Metrics using Probabilities')
doc.asis(perf_df[['AUROC', 'AUPRC', 'average_precision', 'cross_entropy_base2']].to_html())
with tag('h3'):
with tag('a', name='results-performance-metrics-thresh'):
text('Performance Metrics using Thresholded Decisions')
doc.asis(perf_df[['balanced_accuracy', 'accuracy', 'f1_score', 'TPR', 'TNR', 'PPV', 'NPV']].to_html())
with tag('h3'):
with tag('a', name='settings-hyperparameter'):
text('Settings: Hyperparameters to Tune')
with tag('pre'):
hyper_group = None
for g in subparsers_by_name[args.clf_name]._action_groups:
if g.title.count('hyperparameters'):
hyper_group = g
break
for x in get_sorted_list_of_kwargs_specific_to_group_parser(hyper_group):
text(x, ': ', str(vars(args)[x]), '\n')
with tag('h3'):
with tag('a', name='settings-protocol'):
text('Settings: Protocol')
with tag('pre'):
for x in get_sorted_list_of_kwargs_specific_to_group_parser(protocol_group):
text(x, ': ', str(vars(args)[x]), '\n')
with tag('h3'):
with tag('a', name='settings-data'):
text('Settings: Data')
with tag('pre'):
for x in get_sorted_list_of_kwargs_specific_to_group_parser(data_group):
text(x, ': ', str(vars(args)[x]), '\n')
with tag('h3'):
with tag('a', name='results-hyper-search'):
text('Hyperparameter Search results')
with tag('h4'):
text('Train Scores across splits')
doc.asis(pd.DataFrame(cv_tr_perf_df).to_html())
with tag('h4'):
text('Heldout Scores across splits')
doc.asis( | pd.DataFrame(cv_te_perf_df) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(frame_or_series):
c = frame_or_series(range(5)).ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
| date_range("2000", freq="D", periods=10) | pandas.date_range |
''' Descrição: Solução do desafio-05 - osprogramadores.com '''
import json
import pandas as pd
def main():
'''Faz a leitura do arquivo JSON'''
with open('funcionarios.json') as file:
data = json.load(file)
df_funcionario = | pd.DataFrame.from_dict(data['funcionarios']) | pandas.DataFrame.from_dict |
import cv2
import os
import copy
import numpy as np
import pandas as pd
from classix import CLASSIX
import matplotlib.pyplot as plt
from collections import OrderedDict
def order_pics(figs):
images = list()
labels = list()
for i in range(40):
num = i + 1
for img in figs:
try:
if int(img.split('_')[1].replace('.jpg','')) == num:
images.append(img)
labels.append(num)
except:
pass
return images, labels
def load_images(folder, shape=(100, 100)):
images = list()
figs = os.listdir(folder)
figs, targets= order_pics(figs)
for filename in figs:
img = cv2.imread(os.path.join(folder,filename))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # transform to grayscale
img = cv2.resize(img, shape, interpolation = cv2.INTER_AREA) # resize to 80x80
if img is not None:
images.append(img)
images, targets = np.array(images), np.array(targets) - 1
images = images.reshape(images.shape[0], images.shape[1]*images.shape[2])
return images, targets
def reassign_labels(labels, fix=None):
if fix != None:
value_count = pd.Series(labels[labels != fix]).value_counts()
else:
value_count = | pd.Series(labels) | pandas.Series |
import os
from numpy import mean, std, sqrt
from algorithms.common.stopping_criterion import MaxGenerationsCriterion, ErrorDeviationVariationCriterion, TrainingImprovementEffectivenessCriterion
from data.io_plm import _get_path_to_data_dir
import numpy as np
import pandas as pd
def _metric_in_dict(metric, d):
return metric in d[0].keys()
def _get_dictionaries_by_metric(results, metric):
return {k: results[k] for k in results.keys() if _metric_in_dict(metric, results[k])}
def _get_values_from_dictionary(dictionary, metric):
return [d[metric] for d in dictionary if d is not None and metric in d]
def _summarize_metric(metric, summarizer=mean):
return [summarizer([m[i] for m in metric]) for i in range(len(metric[0]))]
def _format_static_table(results, metric):
dictionaries = _get_dictionaries_by_metric(results, metric)
values = {k: _get_values_from_dictionary(dictionaries[k], metric) for k in dictionaries.keys()}
return pd.DataFrame.from_dict(values)
def _format_configuration_table(results, value_to_get):
"""formats number generations, number of layers, learning step value, subset ratio"""
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
values = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_to_get = {k: _get_values_from_dictionary(values[k], value_to_get) for k in dictionaries.keys()}
values_saved = {}
if value_to_get == 'stopping_criterion':
values_gen = {k: _get_values_from_dictionary(results[k], 'topology') for k in dictionaries.keys()} # using topology because it has one value for each gen
for key, value in values_gen.items():
nr_generations = [len(item) - 1 for item in value]
values_saved[key] = nr_generations
elif value_to_get == 'layers':
return pd.DataFrame.from_dict(values_to_get)
elif value_to_get == 'learning_step':
for key, value in values_to_get.items():
if type(value[0]) != str:
values_saved[key] = value
elif value_to_get == 'subset_ratio':
for key, value in values_to_get.items():
if value:
subset_ratio = [item for item in value]
values_saved[key] = subset_ratio
df = pd.DataFrame.from_dict(values_saved, orient='index')
df = df.fillna(0)
return df.T
else:
print('\n\t\t\t[_format_configuration_table] Should not happen!')
return pd.DataFrame.from_dict(values_saved)
def _format_rst_rwt_frequency(results):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
best_configurations = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_saved = {}
for key, value in best_configurations.items():
if (key == 'slm_bls_group' or key == 'slm_ols_group'):
nr_no_RST_RWT = 0
nr_RST = 0
nr_RWT = 0
for run in value:
if (run['random_sampling_technique'] == False and run['random_weighting_technique'] == False):
nr_no_RST_RWT += 1
elif (run['random_sampling_technique'] == True and run['random_weighting_technique'] == False):
nr_RST += 1
elif (run['random_sampling_technique'] == False and run['random_weighting_technique'] == True):
nr_RWT += 1
else:
print('\n\t\t\t[_format_rst_rwt_frequency] Should not happen!')
values = [nr_no_RST_RWT, nr_RST, nr_RWT]
values_saved[key] = values
return pd.DataFrame(values_saved, index=['No RST and No RWT Frequency', 'RST Frequency', 'RWT Frequency'])
def _format_tie_edv_frequency(results):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
best_configurations = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_saved = {}
for key, value in best_configurations.items():
if (key == 'slm_bls_tie_edv_group'):
nr_TIE = 0
nr_EDV = 0
for run in value:
if type(run['stopping_criterion']) == ErrorDeviationVariationCriterion:
nr_EDV += 1
elif type(run['stopping_criterion']) == TrainingImprovementEffectivenessCriterion:
nr_TIE += 1
else:
print('\n\t\t\t[_format_tie_edv_frequency] Should not happen!')
values = [nr_EDV, nr_TIE]
values_saved[key] = values
return pd.DataFrame(values_saved, index=['EDV Frequency', 'TIE Frequency'])
def _format_slm_best_overall_configuration_frequency(best_result):
slm_bls_group_frequency = 0
slm_ols_group_frequency = 0
slm_bls_tie_edv_group_frequency = 0
slm_ols_edv_frequency = 0
values = {}
for run in best_result:
if run['best_overall_key'] == 'slm_bls_group':
slm_bls_group_frequency += 1
elif run['best_overall_key'] == 'slm_ols_group':
slm_ols_group_frequency += 1
elif run['best_overall_key'] == 'slm_bls_tie_edv_group':
slm_bls_tie_edv_group_frequency += 1
elif run['best_overall_key'] == 'slm_ols_edv':
slm_ols_edv_frequency += 1
else:
print('\n\t\t\t[_format_slm_best_overall_configuration_frequency] Should not happen!')
values['slm_bls_group'] = slm_bls_group_frequency
values['slm_ols_group'] = slm_ols_group_frequency
values['slm_bls_tie_edv_group'] = slm_bls_tie_edv_group_frequency
values['slm_ols_edv'] = slm_ols_edv_frequency
df = pd.DataFrame.from_dict(values, orient='index') # check this
df = df.T
return df
def _format_mlp_configuration_table(results, value_to_get, metric=None):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
values = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_to_get = {k: _get_values_from_dictionary(values[k], value_to_get) for k in dictionaries.keys()}
values_saved = {}
if value_to_get == 'learning_rate_init':
for key, value in values_to_get.items():
if value:
learning_rate_values = [item for item in value]
values_saved[key] = learning_rate_values
return pd.DataFrame(values_saved)
elif metric == 'number_layers':
for key, value in values_to_get.items():
nr_layers = [len(item) for item in value]
values_saved[key] = nr_layers
return pd.DataFrame(values_saved)
elif metric == 'number_neurons':
for key, value in values_to_get.items():
nr_neurons = [sum(item) for item in value]
values_saved[key] = nr_neurons
return pd.DataFrame(values_saved)
# else:
# print('\n\t\t\t[_format_mlp_configuration_table] Should not happen!')
return pd.DataFrame.from_dict(values_to_get)
def _format_mlp_activation_function_frequency(results):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
best_configurations = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_saved = {}
for key, value in best_configurations.items():
nr_logistic = 0
nr_relu = 0
nr_tanh = 0
for run in value:
if run['activation'] == 'logistic':
nr_logistic += 1
elif run['activation'] == 'relu':
nr_relu += 1
elif run['activation'] == 'tanh':
nr_tanh += 1
else:
print('\n\t\t\t[_format_mlp_activation_function_frequency] Should not happen!')
values = [nr_logistic, nr_relu, nr_tanh]
values_saved[key] = values
return pd.DataFrame(values_saved, index=['Logistic Frequency', 'Relu Frequency', 'Tanh Frequency'])
def _format_mlp_penalty_frequency(results):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
best_configurations = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_saved = {}
for key, value in best_configurations.items():
nr_penalty = 0
nr_no_penalty = 0
for run in value:
if run['alpha'] == 0:
nr_no_penalty += 1
elif run['alpha'] != 0:
nr_penalty += 1
else:
print('\n\t\t\t[_format_mlp_penalty_frequency] Should not happen!')
values = [nr_no_penalty, nr_penalty]
values_saved[key] = values
return pd.DataFrame(values_saved, index=['No Penalty Frequency', 'Penalty Frequency'])
def _format_mlp_best_overall_configuration_frequency(best_result, classification):
lbfgs_frequency = 0
adam_frequency = 0
sgd_frequency = 0
values = {}
for run in best_result:
if run['best_overall_configuration']['solver'] == 'lbfgs':
lbfgs_frequency += 1
elif run['best_overall_configuration']['solver'] == 'adam':
adam_frequency += 1
elif run['best_overall_configuration']['solver'] == 'sgd':
sgd_frequency += 1
else:
print('\n\t\t\t[_format_mlp_best_overall_configuration_frequency] Should not happen!')
if classification:
# values['mlpc_lbfgs'] = lbfgs_frequency
values['mlpc_adam'] = adam_frequency
values['mlpc_sgd'] = sgd_frequency
else:
# values['mlpr_lbfgs'] = lbfgs_frequency
values['mlpr_adam'] = adam_frequency
values['mlpr_sgd'] = sgd_frequency
df = pd.DataFrame.from_dict(values, orient='index') # check this
df = df.T
return df
#===============================================================================
# # with lbfgs
# def _format_mlp_best_overall_configuration_frequency(best_result, classification):
# lbfgs_frequency = 0
# adam_frequency = 0
# sgd_frequency = 0
# values = {}
# for run in best_result:
# if run['best_overall_configuration']['solver'] == 'lbfgs':
# lbfgs_frequency += 1
# elif run['best_overall_configuration']['solver'] == 'adam':
# adam_frequency += 1
# elif run['best_overall_configuration']['solver'] == 'sgd':
# sgd_frequency += 1
# else:
# print('\n\t\t\t[_format_mlp_best_overall_configuration_frequency] Should not happen!')
# if classification:
# values['mlpc_lbfgs'] = lbfgs_frequency
# values['mlpc_adam'] = adam_frequency
# values['mlpc_sgd'] = sgd_frequency
# else:
# values['mlpr_lbfgs'] = lbfgs_frequency
# values['mlpr_adam'] = adam_frequency
# values['mlpr_sgd'] = sgd_frequency
# df = pd.DataFrame.from_dict(values, orient='index') # check this
# df = df.T
# return df
#===============================================================================
def _format_mlp_sgd_adam_table(results, value_to_get):
dictionaries = _get_dictionaries_by_metric(results, 'best_configuration')
values = {k: _get_values_from_dictionary(dictionaries[k], 'best_configuration') for k in dictionaries.keys()}
values_to_get = {k: _get_values_from_dictionary(values[k], value_to_get) for k in dictionaries.keys()}
values_saved = {}
for key, value in values_to_get.items():
if value:
values_saved[key] = value
return pd.DataFrame.from_dict(values_saved)
def _format_processing_time_table(results):
dictionaries = _get_dictionaries_by_metric(results, 'processing_time')
values = {k: _get_values_from_dictionary(
dictionaries[k], 'processing_time') for k in dictionaries.keys()}
for key, value in values.items():
values[key] = [sum(item) for item in value]
return pd.DataFrame.from_dict(values)
def _format_topology_table(results, component):
dictionaries = _get_dictionaries_by_metric(results, 'topology')
values = {k: _get_values_from_dictionary(dictionaries[k], 'topology') for k in dictionaries.keys()}
values = {key: [item[-1] for item in value] for key, value in values.items()}
values = {key: [item[component] for item in value] for key, value in values.items()}
return pd.DataFrame.from_dict(values)
def _format_evo_table(results, metric):
dictionaries = _get_dictionaries_by_metric(results, metric)
values = {k: _get_values_from_dictionary(dictionaries[k], metric) for k in dictionaries.keys()}
values = {key: [[item[i] for item in value if i < len(item)]
for i in range(max([len(item) for item in value]))] for key, value in values.items()}
max_len = max(len(value) for key, value in values.items())
mean_dict = {key: [mean(item) for item in value] for key, value in values.items()}
se_dict = {key: [std(item) / sqrt(len(item)) for item in value]
for key, value in values.items()}
for key, value in mean_dict.items():
delta_len = max_len - len(value)
mean_dict[key].extend([np.nan for i in range(delta_len)])
for key, value in se_dict.items():
delta_len = max_len - len(value)
se_dict[key].extend([np.nan for i in range(delta_len)])
return pd.DataFrame.from_dict(mean_dict), | pd.DataFrame.from_dict(se_dict) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import gpflow
from gpflow.utilities import print_summary
def make_subset_simplified(cmip6, start, end, column_name, mean_center=False):
Xmake_all = []
Ymake_all = []
dataset_names = cmip6.name.unique()
for n in dataset_names:
p = cmip6[cmip6.name == n]
X = np.expand_dims(p.x,1)
if mean_center:
# mean center based on 1850-2020 data
temp_mean_centered = p[column_name] - np.mean(p[column_name][0:2061])
Y = np.expand_dims(temp_mean_centered,1)
else:
Y = np.expand_dims(p[column_name],1)
keep_tf = np.logical_and(X[:,0] >= start, X[:,0] < end)
Xmake_all.append(X[keep_tf,:])
Ymake_all.append(Y[keep_tf,:])
return Xmake_all, Ymake_all, dataset_names
def make_subset(cmip6, start, end, column_name):
Xmake_all = []
Ymake_all = []
dataset_names = cmip6.name.unique()
for n in dataset_names:
p = cmip6[cmip6.name == n]
X = np.expand_dims(p.time_hrs_since01,1)
# globally mean centered, but maybe not for w/e subset we have here. This bias might accidentally help gp?
temp_mean_centered = p[column_name] - np.mean(p[column_name])
Y = np.expand_dims(temp_mean_centered,1)
keep_tf = np.logical_and(X[:,0] >= start, X[:,0] < end)
Xmake_all.append(X[keep_tf,:])
Ymake_all.append(Y[keep_tf,:])
return Xmake_all, Ymake_all, dataset_names
def fit_ml_single(Y,X,kern_maker):
opt = gpflow.optimizers.Scipy()
max_iter = 1000
kern = kern_maker()
m = gpflow.models.GPR(data=(X, Y), kernel=kern, mean_function=None)
opt_logs = opt.minimize(m.training_loss, m.trainable_variables, options=dict(maxiter=max_iter))
return {'model':m,
'converged':opt_logs['success']}
def fit_ml(Y_all, X_all, dataset_names, kern_maker, param_colnames, param_extractor, filename):
eb_results = pd.DataFrame([], columns=['dataset','convergence','lik_var'] + param_colnames)
opt = gpflow.optimizers.Scipy()
max_iter = 1000
if len(Y_all) != len(dataset_names):
print('Size mismatch. Y:', len(Y_all), '. Names:', len(dataset_names))
return 0
for i in range(len(Y_all)):
kern = kern_maker()
X = X_all[i]
Y = Y_all[i]
m = gpflow.models.GPR(data=(X, Y), kernel=kern, mean_function=None)
opt_logs = opt.minimize(m.training_loss, m.trainable_variables, options=dict(maxiter=max_iter))
#print_summary(m)
results = {'dataset': dataset_names[i],
'convergence': opt_logs['success'],
'lik_var': np.array(m.likelihood.variance)}
param_values = param_extractor(m)
for j in range(len(param_values)):
results[param_colnames[j]] = param_values[j]
eb_results = eb_results.append(results, ignore_index=True)
eb_results.to_csv(filename, index=False)
return eb_results
def compare_loo_gp(param_results, kernel_maker, Xtr_all, Ytr_all, Xte_all, Yte_all, pred_dir=None):
M = param_results.shape[0]
mse_group = np.zeros((M))
mse_single = np.zeros((M))
single_set = []
# b/c we dropped observed
if M != len(Xtr_all):
print('Size mismatch: M', M, ', data', len(Xtr_all))
return 0
for m in range(M):
dataset = param_results.dataset[m]
X_tr = Xtr_all[m]
Y_tr = Ytr_all[m]
X_te = Xte_all[m]
Y_te = Yte_all[m]
group_ests = param_results.drop(m).mean(numeric_only=True)
kern_group = kernel_maker(group_ests)
kern_single = kernel_maker(param_results.loc[m])
m_group = gpflow.models.GPR(data=(X_tr, Y_tr), kernel=kern_group, mean_function=None)
m_group.likelihood.variance = np.double(group_ests.lik_var)
mod = gpflow.models.GPR(data=(X_tr, Y_tr), kernel=kern_single, mean_function=None)
mod.likelihood.variance = np.double(param_results.lik_var[m])
pred_m_group, pred_var_group = m_group.predict_f(X_te)
pred_m, pred_var = mod.predict_f(X_te)
mse_group[m] = np.mean((Y_te[:,0] - pred_m_group[:,0])**2)
mse_single[m] = np.mean((Y_te[:,0]- pred_m[:,0])**2)
single_set.append(dataset)
if pred_dir is not None:
fn = pred_dir + 'test_group_' + param_results.dataset[m] + '.csv'
d = np.array([pred_m_group[:,0], pred_var_group[:,0], Y_te[:,0]]).T
dat = pd.DataFrame(d, columns=['mean', 'var', 'obs'])
dat.to_csv(fn, index=False)
fn = pred_dir + 'test_single_' + param_results.dataset[m] + '.csv'
d = np.array([pred_m[:,0], pred_var[:,0], Y_te[:,0]]).T
dat = | pd.DataFrame(d, columns=['mean', 'var', 'obs']) | pandas.DataFrame |
from pathlib import Path
import os
import re
import pandas as pd
import numpy as np
import random
from math import ceil
import cv2
import glob
import shutil
import experiment_code.constants as consts
from experiment_code.targetfile_utils import Utils
# import experiment_code.targetfile_utils as utils
# create instances of directories
class VisualSearch(Utils):
"""
This class makes target files for Visual Search using parameters set in __init__
Args:
task_name (str): 'visual_search'
orientations (int): orientations of target/distractor stims
balance_blocks (dict): keys are 'condition_name', 'trial_type'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'visual_search'
self.orientations = list([90, 180, 270, 360]) # DO NOT CHANGE ORDER
self.balance_blocks = {'condition_name': {'easy': '4', 'hard': '8'}, 'trial_type': [True, False]}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def _create_columns(self):
def _get_condition(x):
for key in self.balance_blocks['condition_name'].keys():
cond = self.balance_blocks['condition_name'][key]
if x==cond:
value = key
return value
dataframe = pd.DataFrame()
# make `condition_name` column
conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
# conds = [self.balance_blocks['condition_name']['easy'], self.balance_blocks['condition_name']['hard']]
dataframe['stim'] = self.num_trials*conds
dataframe['condition_name'] = dataframe['stim'].apply(lambda x: _get_condition(x))
dataframe['stim'] = dataframe['stim'].astype(int)
# make `trial_type` column
dataframe['trial_type'] = self.num_trials*self.balance_blocks['trial_type']
dataframe['trial_type'] = dataframe['trial_type'].sort_values().reset_index(drop=True)
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=True)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
return dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
def _save_visual_display(self, dataframe):
# add visual display cols
display_pos, orientations_correct = zip(*[self._make_search_display(cond, self.orientations, trial_type) for (cond, trial_type) in zip(dataframe["stim"], dataframe["trial_type"])])
data_dicts = []
for trial_idx, trial_conditions in enumerate(display_pos):
for condition, point in trial_conditions.items():
data_dicts.append({'trial': trial_idx, 'stim': condition, 'xpos': point[0], 'ypos': point[1], 'orientation': orientations_correct[trial_idx][condition]})
# save out to dataframe
df_display = pd.DataFrame.from_records(data_dicts)
# save out visual display
visual_display_name = self._get_visual_display_name()
df_display.to_csv(os.path.join(self.target_dir, visual_display_name))
def _get_visual_display_name(self):
tf_name = f"{self.task_name}_{self.block_dur_secs}sec"
tf_name = self._get_target_file_name(tf_name)
str_part = tf_name.partition(self.task_name)
visual_display_name = 'display_pos' + str_part[2]
return visual_display_name
def _make_search_display(self, display_size, orientations, trial_type):
# make location and orientations lists (for target and distractor items)
# STIM POSITIONS
grid_h_dva = 8.4
grid_v_dva = 11.7
n_h_items = 6
n_v_items = 8
item_h_pos = np.linspace(-grid_h_dva / 2.0, +grid_h_dva/ 2.0, n_h_items)
item_v_pos = np.linspace(-grid_v_dva / 2.0, +grid_v_dva / 2.0, n_v_items)
grid_pos = []
for curr_h_pos in item_h_pos:
for curr_v_pos in item_v_pos:
grid_pos.append([curr_h_pos, curr_v_pos])
locations = random.sample(grid_pos, display_size)
## STIM ORIENTATIONS
orientations_list = orientations*int(display_size/4)
# if trial type is false - randomly replace target stim (90)
# with a distractor
if not trial_type:
orientations_list = [random.sample(orientations[1:],1)[0] if x==90 else x for x in orientations_list]
# if trial is true and larger than 4, leave one target stim (90) in list
# and randomly replace the others with distractor stims
if display_size >4 and trial_type:
indices = [i for i, x in enumerate(orientations_list) if x == 90]
indices.pop(0)
new_num = random.sample(orientations[1:],2) # always assumes that orientations_list is as follows: [90,180,270,360]
for i, n in zip(*(indices, new_num)):
orientations_list[i] = n
return dict(enumerate(locations)), dict(enumerate(orientations_list))
def make_targetfile(self, **kwargs):
"""
makes target file(s) for visual search given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
seeds = np.arange(self.num_blocks)+1
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (2Back- and 2Back+) are equally represented
self.random_state = seeds[self.block]
# create the dataframe
df_target = self._create_columns()
# balance the dataframe
df_target = self._balance_design(dataframe = df_target)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
# save visual display dataframe
self._save_visual_display(dataframe = df_target)
# save target file
self._save_target_files(df_target)
class NBack(Utils):
"""
This class makes target files for N Back using parameters set in __init__
Args:
task_name (str): 'n_back'
n_back (int): default is 2
balance_blocks (dict): keys are 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'n_back'
self.n_back = 2
self.balance_blocks = {'condition_name': {'easy': '2_back-', 'hard': '2_back+'}}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 1.5
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = False
self.display_trial_feedback = True
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def _create_columns(self):
def _get_condition(x):
for key in self.balance_blocks['condition_name'].keys():
cond = self.balance_blocks['condition_name'][key]
if x==cond:
value = key
return value
# make trial_type column
dataframe = pd.DataFrame()
dataframe['trial_type'] = self.num_stims*(True, False)
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
dataframe['trial_type'][:self.n_back] = False # first n+cond_type trials (depending on cond_type) have to be False
# make `n_back` and `condition_name` cols
conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
dataframe['n_back'] = np.where(dataframe["trial_type"]==False, conds[0], conds[1])
dataframe['condition_name'] = dataframe['n_back'].apply(lambda x: _get_condition(x))
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# load in stimuli
stim_files = [f for f in os.listdir(str(consts.stim_dir / self.task_name)) if f.endswith('g')]
# first two images are always random (and false)
# all other images are either match or not a match
random.seed(self.random_state)
stim_list = random.sample(stim_files, k=self.n_back)
for t in dataframe['trial_type'][self.n_back:]: # loop over n+self.n_back
match_img = stim_list[-self.n_back]
no_match_imgs = [stim for stim in stim_files if stim != match_img] # was match_img[0]
if t == False: # not a match
random.seed(self.random_state)
stim_list.append(random.sample(no_match_imgs, k=self.n_back-1))
else: # match
stim_list.append(match_img)
dataframe["stim"] = [''.join(x) for x in stim_list]
return dataframe
def make_targetfile(self, **kwargs):
"""
makes target file(s) for n back given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
seeds = np.arange(self.num_blocks)+1
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (2Back- and 2Back+) are equally represented
self.random_state = seeds[self.block]
# create the dataframe
df_target = self._create_columns()
# balance the dataframe
df_target = self._balance_design(dataframe = df_target)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class SocialPrediction(Utils):
"""
This class makes target files for Social Prediction using parameters set in __init__
Args:
task_name (str): 'social_prediction'
dataset_name (str): 'homevideos' is the default
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
resized (bool): resize frames of video
balance_blocks (dict): keys are 'actors', 'condition_name', 'label'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'social_prediction'
self.dataset_name = 'homevideos'
self.logging_file = 'homevideos_annotations_logging.csv'
self.video_name = ['dynamic_0ms', 'dynamic_100ms']
self.resized = True
self.balance_blocks = {'actors': ['SB', 'MK'],
'condition_name': {'dynamic_0ms': 'easy', 'dynamic_100ms': 'hard'},
'label': ['hug', 'handShake']}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2.5
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
# remove all filenames where any of the videos have not been extracted
stims_to_remove = dataframe.query('extracted==False')["video_name"].to_list()
df_filtered = dataframe[~dataframe["video_name"].isin(stims_to_remove)]
# query rows with relevant videos and relevant labels
label = self.balance_blocks['label']
actors = self.balance_blocks['actors']
df_filtered = df_filtered.query(f'condition_name=={self.video_name} and label=={label} and actors=={actors}')
return df_filtered
def _create_new_columns(self, dataframe):
# make new `stim`
if self.resized:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '_resized' + '.mp4'
else:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '.mp4'
# set `condition name`
dataframe['condition_name'] = dataframe['condition_name'].apply(lambda x: self.balance_blocks['condition_name'][x])
# assign dataset name
dataframe['dataset'] = self.dataset_name
# assign trial type (only works currently for two labels)
labels = self.balance_blocks['label']
if len(labels)==2:
dataframe['trial_type'] = dataframe['label'].apply(lambda x: True if x==labels[0] else False)
else:
print(f'there are an incorrect number of labels, there should be two')
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for social prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class SemanticPrediction(Utils):
"""
This class makes target files for Semantic Prediction using parameters set in __init__
Args:
task_name (str): 'semantic_prediction'
logging_file (str): csv file containing info about stimuli
stem_word_dur (int): length of stem word (sec)
last_word_dur (int): length of last word (sec)
frac (int): proportion of meaningless trials. default is .3.
balance_blocks (dict): keys are 'CoRT_descript', 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'semantic_prediction'
self.logging_file = 'sentence_validation.csv'
self.stem_word_dur = 0.5
self.last_word_dur = 1.5
self.frac = .3
self.balance_blocks = {'CoRT_descript': ['strong non-CoRT', 'strong CoRT'],
'condition_name': {'high cloze': 'easy', 'low cloze': 'hard'}}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 7
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
# conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
conds = list(self.balance_blocks['condition_name'].keys())
dataframe = dataframe.query(f'CoRT_descript=={self.balance_blocks["CoRT_descript"]} and cloze_descript=={conds}')
# strip erroneous characters from sentences
dataframe['stim'] = dataframe['full_sentence'].str.replace('|', ' ')
return dataframe
def _create_new_columns(self, dataframe):
# add condition column
dataframe['condition_name'] = dataframe['cloze_descript'].apply(lambda x: self.balance_blocks['condition_name'][x])
dataframe['stem_word_dur'] = self.stem_word_dur
dataframe['last_word_dur'] = self.last_word_dur
dataframe['trial_dur_correct'] = (dataframe['word_count'] * dataframe['stem_word_dur']) + self.iti_dur + dataframe['last_word_dur']
dataframe['display_trial_feedback'] = self.display_trial_feedback
dataframe.drop({'full_sentence'}, inplace=True, axis=1)
return dataframe
def _add_random_word(self, dataframe, columns):
""" sample `frac_random` and add to `full_sentence`
Args:
dataframe (pandas dataframe): dataframe
Returns:
dataframe with modified `full_sentence` col
"""
idx = dataframe.groupby(columns).apply(lambda x: x.sample(frac=self.frac, replace=False, random_state=self.random_state)).index
sampidx = idx.get_level_values(len(columns)) # get third level
dataframe["trial_type"] = ~dataframe.index.isin(sampidx)
dataframe["last_word"] = dataframe.apply(lambda x: x["random_word"] if not x["trial_type"] else x["target_word"], axis=1)
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for semantic prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`condition_name` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
# add random word based on `self.frac`
df_target = self._add_random_word(dataframe=df_target, columns=['condition_name']) # 'CoRT_descript'
# save out target files
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class ActionObservation(Utils):
"""
This class makes target files for Action Observation using parameters set in __init__
Args:
task_name (str): 'rest'
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
manipulation (str): 'left_right' or 'miss_goal'
resized (bool): resize frames of video
balance_blocks (dict): keys are 'player_name', 'condition_name', 'trial_type'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = "action_observation"
self.logging_file = 'all_clips_annotation_logging.csv'
self.video_name = ['dynamic_120ms']
self.manipulation = 'left_right'
self.resized = True
self.balance_blocks = {'player_name': ['DC', 'EW'], 'condition_name': ['easy', 'hard'], 'trial_type': ['left', 'right']}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = True # sample with or without replacement
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
def _get_player(x):
if x.find('DC')>=0:
player_name = 'DC'
elif x.find('FI')>=0:
player_name = 'FI'
elif x.find('EW')>=0:
player_name = 'EW'
else:
print('player does not exist')
return player_name
# remove all filenames where any of the videos have not been extracted
# and where the player did not accurately hit the target (success=F)
stims_to_remove = dataframe.query('extracted==False or player_success=="?"')["video_name"].to_list()
df_filtered = dataframe[~dataframe["video_name"].isin(stims_to_remove)]
# remove rows without video info
df_filtered = df_filtered.query(f'condition_name=={self.video_name}')
# create `player_name`
df_filtered['player_name'] = df_filtered['video_name'].apply(lambda x: _get_player(x))
# filter `player_name`
cond = self.balance_blocks['player_name']
df_filtered = df_filtered.query(f'player_name=={cond}')
# figure out the actual hits. certain trials (~14%) were misses. enter the actual hit.
df_filtered.loc[df_filtered['hit_target'].isnull(), 'hit_target'] = df_filtered['instructed_target']
return df_filtered
def _create_new_columns(self, dataframe):
def _get_condition(x):
if self.manipulation=="miss_goal":
easy = [1,2,7,8,9,10,15,16]
hard = [3,4,5,6,11,12,13,14]
elif self.manipulation=="left_right":
easy = [1,2,3,4,13,14,15,16]
hard = [5,6,7,8,9,10,11,12]
else:
print('manipulation does not exist')
if x in easy:
condition = "easy"
elif x in hard:
condition = "hard"
else:
condition = float("NaN")
print(f'{x} not in list')
return condition
def _get_trial_type(x):
if self.manipulation=="miss_goal":
list1= [5,6,7,8,9,10,11,12]
list2 = [1,2,3,4,13,14,15,16]
value1 = "goal"
value2 = "miss"
elif self.manipulation=="left_right":
list1 = [1,2,3,4,5,6,7,8]
list2 = [9,10,11,12,13,14,15,16]
value1 = True # 'right'
value2 = False # 'left'
else:
print('manipulation does not exist')
if x in list1:
trial = value1
elif x in list2:
trial = value2
else:
trial = float("NaN")
print(f'{x} not in list')
return trial
# make new image column
if self.resized:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '_resized' + '.mp4'
else:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '.mp4'
# divide targets between easy and hard
dataframe['condition_name'] = dataframe['hit_target'].apply(lambda x: _get_condition(x))
# either miss_goal or left_right based on manipulation
dataframe['trial_type'] = dataframe['hit_target'].apply(lambda x: _get_trial_type(x))
# get time of extraction for video (round to two decimals)
dataframe['video_start_time'] = np.round(dataframe['interact_start'] - dataframe['secs_before_interact'], 2)
dataframe['video_end_time'] = np.round(dataframe['interact_start'] + dataframe['secs_after_interact'], 2)
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for action observation given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class TheoryOfMind(Utils):
"""
This class makes target files for Theory of Mind using parameters set in __init__
Args:
task_name (str): 'theory_of_mind'
logging_file (str): csv file containing info about stimuli
story_dur (int): length of story (sec)
question_dur (int): length of question (sec)
frac (int): proportion of meaningless trials. default is .3.
balance_blocks (dict): keys are 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'theory_of_mind'
self.logging_file = 'theory_of_mind.csv'
self.story_dur = 10
self.question_dur = 4
self.frac = .3
self.balance_blocks = {'condition_name': ['belief','photo'],'trial_type': [True, False]}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 14
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
dataframe = dataframe.query(f'condition=={self.balance_blocks["condition_name"]} and response=={self.balance_blocks["trial_type"]}')
return dataframe
def _create_new_columns(self, dataframe):
# add condition column
# dataframe['condition_name'] = dataframe['condition'].apply(lambda x: self.balance_blocks['condition_name'][x])
dataframe['condition_name'] = dataframe['condition']
dataframe['story_dur'] = self.story_dur
dataframe['question_dur'] = self.question_dur
dataframe['trial_dur_correct'] = dataframe['story_dur'] + self.iti_dur + dataframe['question_dur']
dataframe['display_trial_feedback'] = self.display_trial_feedback
responses = self.balance_blocks['trial_type']
dataframe['trial_type'] = dataframe['response'].apply(lambda x: True if x==responses[0] else False)
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for theory of mind given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = | pd.read_csv(fpath) | pandas.read_csv |
import datetime as dt
import os.path
import re
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pint.errors
import pytest
import scmdata.processing
from scmdata import ScmRun
from scmdata.errors import MissingRequiredColumnError, NonUniqueMetadataError
from scmdata.testing import _check_pandas_less_120
@pytest.fixture(scope="function")
def test_processing_scm_df():
data = np.array(
[
[1, 1.1, 1.2, 1.1],
[1.1, 1.2, 1.3, 1.41],
[1.3, 1.4, 1.5, 1.6],
[1.3, 1.5, 1.6, 1.2],
[1.48, 1.51, 1.72, 1.56],
]
).T
yield ScmRun(
data=data,
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Surface Air Temperature Change"],
"unit": ["K"],
"ensemble_member": range(data.shape[1]),
},
index=[2005, 2006, 2007, 2100],
)
@pytest.fixture()
def test_processing_scm_df_multi_climate_model(test_processing_scm_df):
other = test_processing_scm_df + 0.1
other["climate_model"] = "z_model"
return test_processing_scm_df.append(other)
crossing_times_year_conversions = pytest.mark.parametrize(
"return_year,conv_to_year", ((None, True), (True, True), (False, False),)
)
def _get_calculate_crossing_times_call_kwargs(return_year):
call_kwargs = {}
if return_year is not None:
call_kwargs["return_year"] = return_year
return call_kwargs
def _get_expected_crossing_times(exp_vals, conv_to_year):
if conv_to_year:
exp_vals = [v if pd.isnull(v) else v.year for v in exp_vals]
else:
exp_vals = [pd.NaT if pd.isnull(v) else v for v in exp_vals]
return exp_vals
@pytest.mark.parametrize(
"threshold,exp_vals",
(
(
1.0,
[
dt.datetime(2006, 1, 1), # doesn't cross 1.0 until 2006
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
dt.datetime(2005, 1, 1),
],
),
(
1.5,
[
np.nan, # never crosses
np.nan, # never crosses
dt.datetime(2100, 1, 1), # doesn't cross 1.5 until 2100
dt.datetime(2007, 1, 1), # 2007 is first year to actually exceed 1.5
dt.datetime(2006, 1, 1),
],
),
(2.0, [np.nan, np.nan, np.nan, np.nan, np.nan]),
),
)
@crossing_times_year_conversions
def test_crossing_times(
threshold, exp_vals, return_year, conv_to_year, test_processing_scm_df
):
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df, threshold=threshold, **call_kwargs,
)
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))
pdt.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"end_year",
(
5000,
pytest.param(
10 ** 3, marks=pytest.mark.xfail(reason="ScmRun fails to initialise #179")
),
pytest.param(
10 ** 4, marks=pytest.mark.xfail(reason="ScmRun fails to initialise #179")
),
),
)
@crossing_times_year_conversions
def test_crossing_times_long_runs(
end_year, return_year, conv_to_year, test_processing_scm_df
):
test_processing_scm_df = test_processing_scm_df.timeseries(time_axis="year").rename(
{2100: end_year}, axis="columns"
)
test_processing_scm_df = scmdata.ScmRun(test_processing_scm_df)
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df, threshold=1.5, **call_kwargs,
)
exp_vals = [
np.nan,
np.nan,
dt.datetime(end_year, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
]
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))
pdt.assert_series_equal(res, exp)
@crossing_times_year_conversions
def test_crossing_times_multi_climate_model(
return_year, conv_to_year, test_processing_scm_df_multi_climate_model
):
call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)
threshold = 1.5
exp_vals = [
# a_model
np.nan,
np.nan,
dt.datetime(2100, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
# z_model
np.nan,
dt.datetime(2100, 1, 1),
dt.datetime(2007, 1, 1),
dt.datetime(2006, 1, 1),
dt.datetime(2005, 1, 1),
]
res = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model, threshold=threshold, **call_kwargs,
)
exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)
exp = pd.Series(
exp_vals,
pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta),
)
pdt.assert_series_equal(res, exp)
def _get_expected_crossing_time_quantiles(
cts, groups, exp_quantiles, interpolation, nan_fill_value, nan_fill_threshold
):
cts = cts.fillna(nan_fill_value)
cts_qs = cts.groupby(groups).quantile(q=exp_quantiles, interpolation=interpolation)
out = cts_qs.where(cts_qs < nan_fill_threshold)
out.index = out.index.set_names("quantile", level=-1)
return out
@pytest.mark.parametrize(
"groups", (["model", "scenario"], ["climate_model", "model", "scenario"])
)
@pytest.mark.parametrize(
"quantiles,exp_quantiles",
(
(None, [0.05, 0.5, 0.95]),
([0.05, 0.17, 0.5, 0.83, 0.95], [0.05, 0.17, 0.5, 0.83, 0.95]),
),
)
@pytest.mark.parametrize(
"interpolation,exp_interpolation",
((None, "linear"), ("linear", "linear"), ("nearest", "nearest"),),
)
def test_crossing_times_quantiles(
groups,
quantiles,
exp_quantiles,
interpolation,
exp_interpolation,
test_processing_scm_df_multi_climate_model,
):
threshold = 1.5
crossing_times = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model,
threshold=threshold,
# return_year False handled in
# test_crossing_times_quantiles_datetime_error
return_year=True,
)
exp = _get_expected_crossing_time_quantiles(
crossing_times,
groups,
exp_quantiles,
exp_interpolation,
nan_fill_value=10 ** 6,
nan_fill_threshold=10 ** 5,
)
call_kwargs = {"groupby": groups}
if quantiles is not None:
call_kwargs["quantiles"] = quantiles
if interpolation is not None:
call_kwargs["interpolation"] = interpolation
res = scmdata.processing.calculate_crossing_times_quantiles(
crossing_times, **call_kwargs
)
if _check_pandas_less_120():
check_dtype = False
else:
check_dtype = True
pdt.assert_series_equal(res, exp, check_dtype=check_dtype)
def test_crossing_times_quantiles_datetime_error(
test_processing_scm_df_multi_climate_model,
):
crossing_times = scmdata.processing.calculate_crossing_times(
test_processing_scm_df_multi_climate_model, threshold=1.5, return_year=False,
)
with pytest.raises(NotImplementedError):
scmdata.processing.calculate_crossing_times_quantiles(
crossing_times, ["model", "scenario"]
)
@pytest.mark.parametrize(
"nan_fill_value,out_nan_threshold,exp_vals",
(
(None, None, [2025.4, 2027.0, np.nan]),
(None, 10 ** 4, [2025.4, 2027.0, np.nan]),
(10 ** 5, 10 ** 4, [2025.4, 2027.0, np.nan]),
(10 ** 6, 10 ** 5, [2025.4, 2027.0, np.nan]),
(
# fill value less than threshold means calculated quantiles are used
3000,
10 ** 5,
[2025.4, 2027.0, 2805.4],
),
(3000, 2806, [2025.4, 2027.0, 2805.4]),
(3000, 2805, [2025.4, 2027.0, np.nan]),
),
)
def test_crossing_times_quantiles_nan_fill_values(
nan_fill_value, out_nan_threshold, exp_vals
):
data = np.array(
[
[1.3, 1.35, 1.5, 1.52],
[1.37, 1.43, 1.54, 1.58],
[1.48, 1.51, 1.72, 2.02],
[1.55, 1.65, 1.85, 2.1],
[1.42, 1.46, 1.55, 1.62],
]
).T
ensemble = scmdata.ScmRun(
data=data,
index=[2025, 2026, 2027, 2100],
columns={
"model": ["a_iam"],
"climate_model": ["a_model"],
"scenario": ["a_scenario"],
"region": ["World"],
"variable": ["Surface Air Temperature Change"],
"unit": ["K"],
"ensemble_member": range(data.shape[1]),
},
)
call_kwargs = {}
if nan_fill_value is not None:
call_kwargs["nan_fill_value"] = nan_fill_value
if out_nan_threshold is not None:
call_kwargs["out_nan_threshold"] = out_nan_threshold
crossing_times = scmdata.processing.calculate_crossing_times(
ensemble, threshold=1.53, return_year=True,
)
res = scmdata.processing.calculate_crossing_times_quantiles(
crossing_times,
["climate_model", "scenario"],
quantiles=(0.05, 0.5, 0.95),
**call_kwargs,
)
exp = pd.Series(
exp_vals,
pd.MultiIndex.from_product(
[["a_model"], ["a_scenario"], [0.05, 0.5, 0.95]],
names=["climate_model", "scenario", "quantile"],
),
)
if _check_pandas_less_120():
check_dtype = False
else:
check_dtype = True
pdt.assert_series_equal(res, exp, check_dtype=check_dtype)
output_name_options = pytest.mark.parametrize(
"output_name", (None, "test", "test other")
)
def _get_calculate_exceedance_probs_call_kwargs(output_name):
call_kwargs = {}
if output_name is not None:
call_kwargs["output_name"] = output_name
return call_kwargs
def _get_calculate_exeedance_probs_expected_name(output_name, threshold):
if output_name is not None:
return output_name
return "{} exceedance probability".format(threshold)
@pytest.mark.parametrize(
"threshold,exp_vals",
(
(1.0, [0.8, 1.0, 1.0, 1.0]),
(1.5, [0.0, 0.2, 0.4, 0.4]),
(2.0, [0.0, 0.0, 0.0, 0.0]),
),
)
@output_name_options
def test_exceedance_probabilities_over_time(
output_name, threshold, exp_vals, test_processing_scm_df
):
call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
test_processing_scm_df,
process_over_cols="ensemble_member",
threshold=threshold,
**call_kwargs,
)
exp_idx = pd.MultiIndex.from_frame(
test_processing_scm_df.meta.drop(
"ensemble_member", axis="columns"
).drop_duplicates()
)
exp = pd.DataFrame(
np.array(exp_vals)[np.newaxis, :],
index=exp_idx,
columns=test_processing_scm_df.time_points.to_index(),
)
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(output_name, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
def test_exceedance_probabilities_over_time_multiple_res(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = np.array([[0, 1, 2, 2], [1, 2, 3, 3]]) / 5
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
start, process_over_cols=["ensemble_member"], threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(["ensemble_member"], axis="columns").drop_duplicates()
)
exp = pd.DataFrame(exp_vals, index=exp_idx, columns=start.time_points.to_index())
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(None, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
def test_exceedance_probabilities_over_time_multiple_grouping(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = np.array([1, 3, 5, 5]) / 10
res = scmdata.processing.calculate_exceedance_probabilities_over_time(
start,
process_over_cols=["climate_model", "ensemble_member"],
threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(
["climate_model", "ensemble_member"], axis="columns"
).drop_duplicates()
)
exp = pd.DataFrame(
exp_vals[np.newaxis, :], index=exp_idx, columns=start.time_points.to_index(),
)
exp.index = exp.index.set_levels(
[_get_calculate_exeedance_probs_expected_name(None, threshold)],
level="variable",
).set_levels(["dimensionless"], level="unit",)
pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)
@pytest.mark.parametrize(
"threshold,exp_val", ((1.0, 1.0), (1.5, 0.6), (2.0, 0.0),),
)
@output_name_options
def test_exceedance_probabilities(
output_name, threshold, exp_val, test_processing_scm_df
):
call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)
res = scmdata.processing.calculate_exceedance_probabilities(
test_processing_scm_df,
process_over_cols="ensemble_member",
threshold=threshold,
**call_kwargs,
)
exp_idx = pd.MultiIndex.from_frame(
test_processing_scm_df.meta.drop(
"ensemble_member", axis="columns"
).drop_duplicates()
)
exp = pd.Series(exp_val, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(output_name, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
pdt.assert_series_equal(res, exp)
def test_exceedance_probabilities_multiple_res(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = [0.6, 0.8]
res = scmdata.processing.calculate_exceedance_probabilities(
start, process_over_cols=["ensemble_member"], threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop("ensemble_member", axis="columns").drop_duplicates()
)
exp = pd.Series(exp_vals, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
pdt.assert_series_equal(res, exp)
def test_exceedance_probabilities_multiple_grouping(
test_processing_scm_df_multi_climate_model,
):
start = test_processing_scm_df_multi_climate_model.copy()
threshold = 1.5
exp_vals = [0.7]
res = scmdata.processing.calculate_exceedance_probabilities(
start,
process_over_cols=["ensemble_member", "climate_model"],
threshold=threshold,
)
exp_idx = pd.MultiIndex.from_frame(
start.meta.drop(
["ensemble_member", "climate_model"], axis="columns"
).drop_duplicates()
)
exp = pd.Series(exp_vals, index=exp_idx)
exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)
exp.index = exp.index.set_levels(["dimensionless"], level="unit",)
| pdt.assert_series_equal(res, exp) | pandas.testing.assert_series_equal |
import os
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
import numpy as np
import random
import tensorflow as tf
import torch
#directory of tasks dataset
os.chdir("original_data")
#destination path to create tsv files, dipends on data cutting
path_0 = "mttransformer/data/0"
path_100_no_gan = "mttransformer/data/100/no_gan"
path_200_no_gan = "mttransformer/data/200/no_gan"
path_500_no_gan = "mttransformer/data/500/no_gan"
path_100_gan = "mttransformer/data/100/gan"
path_200_gan = "mttransformer/data/200/gan"
path_500_gan = "mttransformer/data/500/gan"
#if you use a model with gan the flag "apply_gan" is True, else False
apply_gan=False
#data cutting
number_labeled_examples=0 #0-100-200-500
#if you want activate balancing, that is used only in the model Multi-task, MT-DNN and MT-GANBERT
balancing=False
#path train and test dataset of the task
tsv_haspeede_train = 'haspeede_TW-train.tsv'
tsv_haspeede_test = 'haspeede_TW-reference.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_DANKMEMES2020_train = 'dankmemes_task2_train.csv'
tsv_DANKMEMES2020_test = 'hate_test.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
#Upload the dataset of all task as dataframes
#haspeede_TW
df_train = pd.read_csv(tsv_haspeede_train, delimiter='\t', names=('id','sentence','label'))
df_train = df_train[['id']+['label']+['sentence']]
df_test = pd.read_csv(tsv_haspeede_test, delimiter='\t', names=('id','sentence','label'))
df_test = df_test[['id']+['label']+['sentence']]
#AMI2018A
df_train2 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df_train2 = df_train2[['id']+['misogynous']+['text']]
df_test2 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df_test2 = df_test2[['id']+['misogynous']+['text']]
#AMI2018B
df_train3 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_train3.index:
if df_train3.misogynous[ind]==1:
if df_train3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 0, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
df_train3 = df
df_test3 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_test3.index:
if df_test3.misogynous[ind]==1:
if df_test3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 0, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
df_test3 = df
#DANKMEMES2020
df_train4 = pd.read_csv(tsv_DANKMEMES2020_train, delimiter=',')
df_train4 = df_train4[['File']+['Hate Speech']+['Text']]
df_test4 = pd.read_csv(tsv_DANKMEMES2020_test, delimiter=',')
df_test4 = df_test4[['File']+['Hate Speech']+['Text']]
#SENTIPOLC20161
df_train5 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df_train5 = df_train5[['idtwitter']+['subj']+['text']]
df_test5 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df_test5 = df_test5[['idtwitter']+['subj']+['text']]
for ind in df_train5.index:
if "\t" in df_train5.text[ind]:
df_train5 = df_train5.replace(to_replace='\t', value='', regex=True)
#SENTIPOLC20162
df_train6 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_train6.index:
if df_train6['subj'][ind] == 1:
if df_train6['opos'][ind] == 1 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 0, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 1, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
else:
if df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
df_train6 = df
for ind in df_train6.index:
if "\t" in df_train6.text[ind]:
df_train6 = df_train6.replace(to_replace='\t', value='', regex=True)
df_test6 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_test6.index:
if df_test6['subj'][ind] == 1:
if df_test6['opos'][ind] == 1 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 0, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 1, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
else:
if df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
df_test6 = df
#split train dev, in all tasks
train_dataset, dev_dataset = train_test_split(df_train, test_size=0.2, shuffle = True)
train_dataset2, dev_dataset2 = train_test_split(df_train2, test_size=0.2, shuffle = True)
train_dataset3, dev_dataset3 = train_test_split(df_train3, test_size=0.2, shuffle = True)
train_dataset4, dev_dataset4 = train_test_split(df_train4, test_size=0.2, shuffle = True)
train_dataset5, dev_dataset5 = train_test_split(df_train5, test_size=0.2, shuffle = True)
train_dataset6, dev_dataset6 = train_test_split(df_train6, test_size=0.2, shuffle = True)
#reduction of datasets in case of data cutting 100, 200, 500
if number_labeled_examples!=0:
if number_labeled_examples==100:
labeled = train_dataset.sample(n=100)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=100)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=100)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=100)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=100)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=100)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==200:
labeled = train_dataset.sample(n=200)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=200)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=200)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=200)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=200)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=200)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==500:
labeled = train_dataset.sample(n=500)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=500)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=500)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=500)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=500)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=500)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
#model with or without gan
if apply_gan == True:
print("MT-GANBERT")
#dataset unlabeled with label -1
unlabeled['label'] = unlabeled['label'].replace(0,-1)
unlabeled['label'] = unlabeled['label'].replace(1,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(0,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(0,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(2,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(3,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(4,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(0,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(1,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(0,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(0,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(2,-1)
train = pd.concat([labeled, unlabeled])
train2 = pd.concat([labeled2, unlabeled2])
train3 = pd.concat([labeled3, unlabeled3])
train4 = pd.concat([labeled4, unlabeled4])
train5 = pd.concat([labeled5, unlabeled5])
train6 = pd.concat([labeled6, unlabeled6])
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train),len(labeled), len(unlabeled)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train2),len(labeled2), len(unlabeled2)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train3),len(labeled3), len(unlabeled3)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train4),len(labeled4), len(unlabeled4)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train5),len(labeled5), len(unlabeled5)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train6),len(labeled6), len(unlabeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN, with reduction dataset")
train = labeled
train2 = labeled2
train3 = labeled3
train4 = labeled4
train5 = labeled5
train6 = labeled6
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {} ".format(len(labeled)))
print("Size of Train dataset is {} ".format(len(labeled2)))
print("Size of Train dataset is {} ".format(len(labeled3)))
print("Size of Train dataset is {} ".format(len(labeled4)))
print("Size of Train dataset is {} ".format(len(labeled5)))
print("Size of Train dataset is {} ".format(len(labeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN")
train = train_dataset
train2 = train_dataset2
train3 = train_dataset3
train4 = train_dataset4
train5 = train_dataset5
train6 = train_dataset6
dev = dev_dataset
dev2 = dev_dataset2
dev3=dev_dataset3
dev4=dev_dataset4
dev5=dev_dataset5
dev6=dev_dataset6
print("Size of Train dataset is {} ".format(len(train)))
print("Size of Train dataset is {} ".format(len(train2)))
print("Size of Train dataset is {} ".format(len(train3)))
print("Size of Train dataset is {} ".format(len(train4)))
print("Size of Train dataset is {} ".format(len(train5)))
print("Size of Train dataset is {} ".format(len(train6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
#Balancing for:
#- MT-DNN, trained on the total dataset of each task
#- MT-GAN, trained on the chosen data cutting of each task
if balancing==True:
if apply_gan== True:
print("MT-GAN")
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
else:
print("MT-DNN")
unlabeled=train
unlabeled2=train2
unlabeled3=train3
unlabeled4=train4
unlabeled5=train5
unlabeled6=train6
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
#double dataset
df = pd.DataFrame(columns=['id', 'label', 'sentence'])
count=0
if len(unlabeled)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[i, 0], 'label' : unlabeled.iloc[i, 1], 'sentence' : unlabeled.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled = df
if apply_gan== True:
train = pd.concat([labeled, unlabeled])
else:
train=unlabeled
df = pd.DataFrame(columns=['id', 'misogynous', 'text'])
count=0
if len(unlabeled2)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled2):
df = df.append({'id' : unlabeled2.iloc[i, 0], 'misogynous' : unlabeled2.iloc[i, 1], 'text' : unlabeled2.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled2):
df = df.append({'id' : unlabeled2.iloc[count, 0], 'misogynous' : unlabeled2.iloc[count, 1], 'text' : unlabeled2.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled2.iloc[count, 0], 'misogynous' : unlabeled2.iloc[count, 1], 'text' : unlabeled2.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled2 = df
if apply_gan==True:
train2 = | pd.concat([labeled2, unlabeled2]) | pandas.concat |
# CCI (Commodity Channel Index)
# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
# Bir menkul kıymetin fiyat değişikliği ile ortalama fiyat değişikliği
# arasındaki farkı ölçer. Yüksek pozitif okumalar, fiyatların ortalamalarının
# oldukça üzerinde olduğunu ve bunun da bir güç gösterisi olduğunu gösterir.
# Düşük negatif okumalar, fiyatların ortalamalarının oldukça altında olduğunu
# ve bunun da bir zayıflık göstergesi olduğunu gösterir.
# Argümanlar:
# high(pandas.Series): veri kümesi 'Yüksek' sütunu.
# low(pandas.Series): veri kümesi 'Düşük' sütunu.
# close(pandas.Series): veri kümesi 'Kapat' sütunu.
# window(int): n periyodu.
# constant(int): sabit.
# fillna(bool): True ise, nan değerlerini doldur.
import numpy as np
import pandas as pd
from _utilities import IndicatorMixin
class CCIIndicator(IndicatorMixin):
def __init__(
self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
window: int = 20,
constant: float = 0.015,
fillna: bool = False,
):
self._high = high
self._low = low
self._close = close
self._window = window
self._constant = constant
self._fillna = fillna
self._run()
def _run(self):
def _mad(x):
return np.mean(np.abs(x - np.mean(x)))
min_periods = 0 if self._fillna else self._window
typical_price = (self._high + self._low + self._close) / 3.0
self._cci = (
typical_price
- typical_price.rolling(self._window, min_periods=min_periods).mean()
) / (
self._constant
* typical_price.rolling(self._window, min_periods=min_periods).apply(
_mad, True
)
)
def cci(self) -> pd.Series:
cci_series = self._check_fillna(self._cci, value=0)
return | pd.Series(cci_series, name="cci") | pandas.Series |
'''
This script tests the function from shapiro_wilk.py
Parameters
----------
None
Returns
-------
Assertion errors if tests fail
'''
# dependencies
import pytest
import numpy as np
import pandas as pd
from normtestPY.shapiro_wilk import shapiro_wilk
# Sample data
data_df = pd.DataFrame({'data' : [41.5,38.7,44.5,43.8,46.0,39.4, 40.6, 42.7],
'data2' : [65,63,86,70,74,35,68,45]})
data_list1 = [41.5,38.7,44.5,43.8,46.0,39.4, 40.6, 42.7]
data_list2 = [1, 2, 3, 4, 5,6,7,8]
data_list3 = [data_list1, data_list2]
data_list4 = [1, 2, 3]
data_ndarray = np.array([[41.5,38.7,44.5,43.8,46.0,39.4, 40.6, 42.7],
[65,63,86,70,74,35,68,45]]).T
data_series1 = | pd.Series([41.5,38.7,44.5,43.8,46.0]) | pandas.Series |
import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
DatetimeIndex,
Series,
Timestamp,
date_range,
isna,
notna,
offsets,
)
import pandas._testing as tm
class TestSeriesAsof:
def test_asof_nanosecond_index_access(self):
ts = Timestamp("20130101").value
dti = DatetimeIndex([ts + 50 + i for i in range(100)])
ser = Series(np.random.randn(100), index=dti)
first_value = ser.asof(ser.index[0])
# GH#46903 previously incorrectly was "day"
assert dti.resolution == "nanosecond"
# this used to not work bc parsing was done by dateutil that didn't
# handle nanoseconds
assert first_value == ser["2013-01-01 00:00:00.000000050"]
expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")
assert first_value == ser[Timestamp(expected_ts)]
def test_basic(self):
# array or list or dates
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.arange(N), index=rng)
ts.iloc[5:10] = np.NaN
ts.iloc[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range("1/1/2000", "1/2/2000", freq="4h")
s = Series(np.arange(len(rng)), index=rng)
r = s.resample("2h").mean()
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import (
PeriodIndex,
period_range,
)
# array or list or dates
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="37min")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq="H")
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts.iloc[5:10] = np.nan
ts.iloc[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
ts.asof(rng.asfreq("D"))
def test_errors(self):
s = Series(
[1, 2, 3],
index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")],
)
# non-monotonic
assert not s.index.is_monotonic_increasing
with pytest.raises(ValueError, match="requires a sorted index"):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range("1/1/1990", periods=N, freq="53s")
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError, match="not valid for Series"):
s.asof(s.index[0], subset="foo")
def test_all_nans(self):
# GH 15713
# series is all nans
# testing non-default indexes
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = | Series(np.nan, index=rng) | pandas.Series |
# -*- coding: utf-8 -*-
import json
import base64
import datetime
import requests
import pathlib
import math
import pandas as pd
import flask
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from plotly import tools
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
# Loading historical tick data
currency_pair_data = {
"EURUSD": pd.read_csv(
DATA_PATH.joinpath("EURUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDJPY": pd.read_csv(
DATA_PATH.joinpath("USDJPY.csv"), index_col=1, parse_dates=["Date"]
),
"GBPUSD": pd.read_csv(
DATA_PATH.joinpath("GBPUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDCHF": pd.read_csv(
DATA_PATH.joinpath("USDCHF.csv"), index_col=1, parse_dates=["Date"]
),
}
# Currency pairs
currencies = ["EURUSD", "USDCHF", "USDJPY", "GBPUSD"]
# API Requests for news div
news_requests = requests.get(
"https://newsapi.org/v2/top-headlines?sources=bbc-news&apiKey=da8e2e705b914f9f86ed2e9692e66012"
)
# API Call to update news
def update_news():
json_data = news_requests.json()["articles"]
df = pd.DataFrame(json_data)
df = pd.DataFrame(df[["title", "url"]])
max_rows = 10
return html.Div(
children=[
html.P(className="p-news", children="Headlines"),
html.P(
className="p-news float-right",
children="Last update : "
+ datetime.datetime.now().strftime("%H:%M:%S"),
),
html.Table(
className="table-news",
children=[
html.Tr(
children=[
html.Td(
children=[
html.A(
className="td-link",
children=df.iloc[i]["title"],
href=df.iloc[i]["url"],
target="_blank",
)
]
)
]
)
for i in range(min(len(df), max_rows))
],
),
]
)
# Returns dataset for currency pair with nearest datetime to current time
def first_ask_bid(currency_pair, t):
t = t.replace(year=2016, month=1, day=5)
items = currency_pair_data[currency_pair]
dates = items.index.to_pydatetime()
index = min(dates, key=lambda x: abs(x - t))
df_row = items.loc[index]
int_index = items.index.get_loc(index)
return [df_row, int_index] # returns dataset row and index of row
# Creates HTML Bid and Ask (Buy/Sell buttons)
def get_row(data):
index = data[1]
current_row = data[0]
return html.Div(
children=[
# Summary
html.Div(
id=current_row[0] + "summary",
className="row summary",
n_clicks=0,
children=[
html.Div(
id=current_row[0] + "row",
className="row",
children=[
html.P(
current_row[0], # currency pair name
id=current_row[0],
className="three-col",
),
html.P(
current_row[1].round(5), # Bid value
id=current_row[0] + "bid",
className="three-col",
),
html.P(
current_row[2].round(5), # Ask value
id=current_row[0] + "ask",
className="three-col",
),
html.Div(
index,
id=current_row[0]
+ "index", # we save index of row in hidden div
style={"display": "none"},
),
],
)
],
),
# Contents
html.Div(
id=current_row[0] + "contents",
className="row details",
children=[
# Button for buy/sell modal
html.Div(
className="button-buy-sell-chart",
children=[
html.Button(
id=current_row[0] + "Buy",
children="Buy/Sell",
n_clicks=0,
)
],
),
# Button to display currency pair chart
html.Div(
className="button-buy-sell-chart-right",
children=[
html.Button(
id=current_row[0] + "Button_chart",
children="Chart",
n_clicks=1
if current_row[0] in ["EURUSD", "USDCHF"]
else 0,
)
],
),
],
),
]
)
# color of Bid & Ask rates
def get_color(a, b):
if a == b:
return "white"
elif a > b:
return "#45df7e"
else:
return "#da5657"
# Replace ask_bid row for currency pair with colored values
def replace_row(currency_pair, index, bid, ask):
index = index + 1 # index of new data row
new_row = (
currency_pair_data[currency_pair].iloc[index]
if index != len(currency_pair_data[currency_pair])
else first_ask_bid(currency_pair, datetime.datetime.now())
) # if not the end of the dataset we retrieve next dataset row
return [
html.P(
currency_pair, id=currency_pair, className="three-col" # currency pair name
),
html.P(
new_row[1].round(5), # Bid value
id=new_row[0] + "bid",
className="three-col",
style={"color": get_color(new_row[1], bid)},
),
html.P(
new_row[2].round(5), # Ask value
className="three-col",
id=new_row[0] + "ask",
style={"color": get_color(new_row[2], ask)},
),
html.Div(
index, id=currency_pair + "index", style={"display": "none"}
), # save index in hidden div
]
# Display big numbers in readable format
def human_format(num):
try:
num = float(num)
# If value is 0
if num == 0:
return 0
# Else value is a number
if num < 1000000:
return num
magnitude = int(math.log(num, 1000))
mantissa = str(int(num / (1000 ** magnitude)))
return mantissa + ["", "K", "M", "G", "T", "P"][magnitude]
except:
return num
# Returns Top cell bar for header area
def get_top_bar_cell(cellTitle, cellValue):
return html.Div(
className="two-col",
children=[
html.P(className="p-top-bar", children=cellTitle),
html.P(id=cellTitle, className="display-none", children=cellValue),
html.P(children=human_format(cellValue)),
],
)
# Returns HTML Top Bar for app layout
def get_top_bar(
balance=50000, equity=50000, margin=0, fm=50000, m_level="%", open_pl=0
):
return [
get_top_bar_cell("Balance", balance),
get_top_bar_cell("Equity", equity),
get_top_bar_cell("Margin", margin),
get_top_bar_cell("Free Margin", fm),
get_top_bar_cell("Margin Level", m_level),
get_top_bar_cell("Open P/L", open_pl),
]
####### STUDIES TRACES ######
# Moving average
def moving_average_trace(df, fig):
df2 = df.rolling(window=5).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="MA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Exponential moving average
def e_moving_average_trace(df, fig):
df2 = df.rolling(window=20).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="EMA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Bollinger Bands
def bollinger_trace(df, fig, window_size=10, num_of_std=5):
price = df["close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
trace = go.Scatter(
x=df.index, y=upper_band, mode="lines", showlegend=False, name="BB_upper"
)
trace2 = go.Scatter(
x=df.index, y=rolling_mean, mode="lines", showlegend=False, name="BB_mean"
)
trace3 = go.Scatter(
x=df.index, y=lower_band, mode="lines", showlegend=False, name="BB_lower"
)
fig.append_trace(trace, 1, 1) # plot in first row
fig.append_trace(trace2, 1, 1) # plot in first row
fig.append_trace(trace3, 1, 1) # plot in first row
return fig
# Accumulation Distribution
def accumulation_trace(df):
df["volume"] = ((df["close"] - df["low"]) - (df["high"] - df["close"])) / (
df["high"] - df["low"]
)
trace = go.Scatter(
x=df.index, y=df["volume"], mode="lines", showlegend=False, name="Accumulation"
)
return trace
# Commodity Channel Index
def cci_trace(df, ndays=5):
TP = (df["high"] + df["low"] + df["close"]) / 3
CCI = pd.Series(
(TP - TP.rolling(window=10, center=False).mean())
/ (0.015 * TP.rolling(window=10, center=False).std()),
name="cci",
)
trace = go.Scatter(x=df.index, y=CCI, mode="lines", showlegend=False, name="CCI")
return trace
# Price Rate of Change
def roc_trace(df, ndays=5):
N = df["close"].diff(ndays)
D = df["close"].shift(ndays)
ROC = pd.Series(N / D, name="roc")
trace = go.Scatter(x=df.index, y=ROC, mode="lines", showlegend=False, name="ROC")
return trace
# Stochastic oscillator %K
def stoc_trace(df):
SOk = pd.Series((df["close"] - df["low"]) / (df["high"] - df["low"]), name="SO%k")
trace = go.Scatter(x=df.index, y=SOk, mode="lines", showlegend=False, name="SO%k")
return trace
# Momentum
def mom_trace(df, n=5):
M = pd.Series(df["close"].diff(n), name="Momentum_" + str(n))
trace = go.Scatter(x=df.index, y=M, mode="lines", showlegend=False, name="MOM")
return trace
# Pivot points
def pp_trace(df, fig):
PP = pd.Series((df["high"] + df["low"] + df["close"]) / 3)
R1 = pd.Series(2 * PP - df["low"])
S1 = pd.Series(2 * PP - df["high"])
R2 = pd.Series(PP + df["high"] - df["low"])
S2 = | pd.Series(PP - df["high"] + df["low"]) | pandas.Series |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._data[mask], index[mask], self._name)
return hpat_pandas_series_getitem_index_impl
if (isinstance(idx, types.Integer) and index_is_string):
def hpat_pandas_series_idx_impl(self, idx):
return self._data[idx]
return hpat_pandas_series_idx_impl
if isinstance(idx, types.SliceType):
# Return slice for str values not implement
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_slice_impl
if (
isinstance(idx, (types.List, types.Array)) and
isinstance(idx.dtype, (types.Boolean, bool))
):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (index_is_none and isinstance(idx, SeriesType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
index = numpy.arange(len(self._data))
if (index != idx.index).sum() == 0:
return pandas.Series(self._data[idx._data], index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
res = numpy.copy(self._data[:len(idx._data)])
index = numpy.arange(len(self._data))
for i in numba.prange(len(res)):
for j in numba.prange(len(index)):
if j == idx._data[i]:
res[i] = self._data[j]
return pandas.Series(res, index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (isinstance(idx, SeriesType) and not isinstance(self.index, types.NoneType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
# Series with str index not implement
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
if (self._index != idx._index).sum() == 0:
return pandas.Series(self._data[idx._data], self._index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_series_impl
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
index = self.index
data = self._data
size = len(index)
data_res = []
index_res = []
for value in idx._data:
mask = numpy.zeros(shape=size, dtype=numpy.bool_)
for i in numba.prange(size):
mask[i] = index[i] == value
data_res.extend(data[mask])
index_res.extend(index[mask])
return pandas.Series(data=data_res, index=index_res, name=self._name)
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Number, Slice, String, Boolean Array or a Series.\
Given: {}'.format(_func_name, idx))
@sdc_overload(operator.setitem)
def hpat_pandas_series_setitem(self, idx, value):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.__setitem__
Examples
--------
.. literalinclude:: ../../../examples/series_setitem_int.py
:language: python
:lines: 27-
:caption: Setting Pandas Series elements
:name: ex_series_setitem
.. code-block:: console
> python ./series_setitem_int.py
0 0
1 4
2 3
3 2
4 1
dtype: int64
> python ./series_setitem_slice.py
0 5
1 4
2 0
3 0
4 0
dtype: int64
> python ./series_setitem_series.py
0 5
1 0
2 3
3 0
4 1
dtype: int64
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.set` implementation
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_setitem*
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
value: :object
input value
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
ty_checker = TypeChecker('Operator setitem.')
ty_checker.check(self, SeriesType)
if not (isinstance(idx, (types.Integer, types.SliceType, SeriesType))):
ty_checker.raise_exc(idx, 'int, Slice, Series', 'idx')
if not((isinstance(value, SeriesType) and isinstance(value.dtype, self.dtype)) or \
isinstance(value, type(self.dtype))):
ty_checker.raise_exc(value, self.dtype, 'value')
if isinstance(idx, types.Integer) or isinstance(idx, types.SliceType):
def hpat_pandas_series_setitem_idx_integer_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_value
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_slice
"""
self._data[idx] = value
return self
return hpat_pandas_series_setitem_idx_integer_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_setitem_idx_series_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_series
"""
super_index = idx._data
self._data[super_index] = value
return self
return hpat_pandas_series_setitem_idx_series_impl
@sdc_overload_attribute(SeriesType, 'iloc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series method :meth:`pandas.Series.iloc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iloc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iloc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iloc')
return hpat_pandas_series_iloc_impl
@sdc_overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_loc(self):
"""
Pandas Series method :meth:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_loc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_loc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'loc')
return hpat_pandas_series_loc_impl
@sdc_overload_attribute(SeriesType, 'iat')
def hpat_pandas_series_iat(self):
"""
Pandas Series method :meth:`pandas.Series.iat` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iat*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iat().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iat_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iat')
return hpat_pandas_series_iat_impl
@sdc_overload_attribute(SeriesType, 'at')
def hpat_pandas_series_at(self):
"""
Pandas Series method :meth:`pandas.Series.at` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_at*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute at().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_at_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'at')
return hpat_pandas_series_at_impl
@sdc_overload_method(SeriesType, 'nsmallest')
def hpat_pandas_series_nsmallest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nsmallest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nsmallest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nsmallest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nsmallest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nsmallest(). Unsupported parameter. Given 'keep' != 'first'")
# mergesort is used for stable sorting of repeated values
indices = self._data.argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nsmallest_impl
@sdc_overload_method(SeriesType, 'nlargest')
def hpat_pandas_series_nlargest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nlargest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nlargest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nlargest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nlargest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nlargest(). Unsupported parameter. Given 'keep' != 'first'")
# data: [0, 1, -1, 1, 0] -> [1, 1, 0, 0, -1]
# index: [0, 1, 2, 3, 4] -> [1, 3, 0, 4, 2] (not [3, 1, 4, 0, 2])
# subtract 1 to ensure reverse ordering at boundaries
indices = (-self._data - 1).argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nlargest_impl
@sdc_overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@sdc_overload_method(SeriesType, 'std')
def hpat_pandas_series_std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.std` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method std().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_std_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
var = self.var(axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only)
return var ** 0.5
return hpat_pandas_series_std_impl
@sdc_overload_attribute(SeriesType, 'values')
def hpat_pandas_series_values(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@sdc_overload_method(SeriesType, 'value_counts')
def hpat_pandas_series_value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.value_counts
Examples
--------
.. literalinclude:: ../../../examples/series/series_value_counts.py
:language: python
:lines: 27-
:caption: Getting the number of values excluding NaNs
:name: ex_series_value_counts
.. command-output:: python ./series/series_value_counts.py
:cwd: ../../../examples
.. note::
Parameter bins and dropna for Strings are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.count <pandas.Series.count>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.value_counts` implementation.
Note: Elements with the same count might appear in result in a different order than in Pandas
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_value_counts*
Parameters
-----------
self: :obj:`pandas.Series`
input series
normalize: :obj:`boolean`, default False
If True then the object returned will contain the relative frequencies of the unique values
sort: :obj: `boolean`, default True
Sort by frequencies
ascending: :obj:`boolean`, default False
Sort in ascending order
bins: :obj:`integer`, default None
*unsupported*
dropna: :obj:`boolean`, default True
Skip counts of NaN
Returns
-------
:returns :obj:`pandas.Series`
"""
_func_name = 'Method value_counts().'
ty_checker = TypeChecker('Method value_counts().')
ty_checker.check(self, SeriesType)
if not isinstance(normalize, (types.Omitted, types.Boolean, bool)) and normalize is True:
ty_checker.raise_exc(normalize, 'boolean', 'normalize')
if not isinstance(sort, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(sort, 'boolean', 'sort')
if not isinstance(ascending, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(ascending, 'boolean', 'ascending')
if not isinstance(bins, (types.Omitted, types.NoneType)) and bins is not None:
ty_checker.raise_exc(bins, 'boolean', 'bins')
if not isinstance(dropna, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(dropna, 'boolean', 'dropna')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_value_counts_str_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=types.unicode_type,
value_type=types.intp
)
nan_counts = 0
for i, value in enumerate(self._data):
if str_arr_is_na(self._data, i):
if not dropna:
nan_counts += 1
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
need_add_nan_count = not dropna and nan_counts
values = [key for key in value_counts_dict]
counts_as_list = [value_counts_dict[key] for key in value_counts_dict.keys()]
values_len = len(values)
if need_add_nan_count:
# append a separate empty string for NaN elements
values_len += 1
values.append('')
counts_as_list.append(nan_counts)
counts = numpy.asarray(counts_as_list, dtype=numpy.intp)
indexes_order = numpy.arange(values_len)
if sort:
indexes_order = counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
counts_sorted = numpy.take(counts, indexes_order)
values_sorted_by_count = [values[i] for i in indexes_order]
# allocate the result index as a StringArray and copy values to it
index_string_lengths = numpy.asarray([len(s) for s in values_sorted_by_count])
index_total_chars = numpy.sum(index_string_lengths)
result_index = pre_alloc_string_array(len(values_sorted_by_count), index_total_chars)
cp_str_list_to_array(result_index, values_sorted_by_count)
if need_add_nan_count:
# set null bit for StringArray element corresponding to NaN element (was added as last in values)
index_previous_nan_pos = values_len - 1
for i in numpy.arange(values_len):
if indexes_order[i] == index_previous_nan_pos:
str_arr_set_na(result_index, i)
break
return pandas.Series(counts_sorted, index=result_index, name=self._name)
return hpat_pandas_series_value_counts_str_impl
elif isinstance(self.dtype, types.Number):
series_dtype = self.dtype
def hpat_pandas_series_value_counts_number_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=series_dtype,
value_type=types.intp
)
zero_counts = 0
is_zero_found = False
for value in self._data:
if (dropna and numpy.isnan(value)):
continue
# Pandas hash-based value_count_float64 function doesn't distinguish between
# positive and negative zeros, hence we count zero values separately and store
# as a key the first zero value found in the Series
if not value:
zero_counts += 1
if not is_zero_found:
zero_value = value
is_zero_found = True
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
if zero_counts:
value_counts_dict[zero_value] = zero_counts
unique_values = numpy.asarray(
list(value_counts_dict),
dtype=self._data.dtype
)
value_counts = numpy.asarray(
[value_counts_dict[key] for key in value_counts_dict],
dtype=numpy.intp
)
indexes_order = numpy.arange(len(value_counts))
if sort:
indexes_order = value_counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
sorted_unique_values = numpy.take(unique_values, indexes_order)
sorted_value_counts = numpy.take(value_counts, indexes_order)
return pandas.Series(sorted_value_counts, index=sorted_unique_values, name=self._name)
return hpat_pandas_series_value_counts_number_impl
return None
@sdc_overload_method(SeriesType, 'var')
def hpat_pandas_series_var(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.var` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method var().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_var_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
if skipna is None:
skipna = True
if skipna:
valuable_length = len(self._data) - numpy.sum(numpy.isnan(self._data))
if valuable_length <= ddof:
return numpy.nan
return numpy.nanvar(self._data) * valuable_length / (valuable_length - ddof)
if len(self._data) <= ddof:
return numpy.nan
return self._data.var() * len(self._data) / (len(self._data) - ddof)
return hpat_pandas_series_var_impl
@sdc_overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index1
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_index_none_impl(self):
return numpy.arange(len(self._data))
return hpat_pandas_series_index_none_impl
else:
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@sdc_overload_method(SeriesType, 'rolling')
def hpat_pandas_series_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.rolling
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.Series.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.Series.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling
Parameters
----------
series: :obj:`pandas.Series`
Input Series.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.Series.rolling`
Output class to manipulate with input data.
"""
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, SeriesType)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def hpat_pandas_series_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return _hpat_pandas_series_rolling_init(self, window, minp, center,
win_type, on, axis, closed)
return hpat_pandas_series_rolling_impl
@sdc_overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@sdc_overload_attribute(SeriesType, 'str')
def hpat_pandas_series_str(self):
"""
Pandas Series attribute :attr:`pandas.Series.str` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.core.strings.StringMethods`
Output class to manipulate with input data.
"""
_func_name = 'Attribute str.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.List, types.UnicodeType)):
msg = '{} Can only use .str accessor with string values. Given: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
def hpat_pandas_series_str_impl(self):
return pandas.core.strings.StringMethods(self)
return hpat_pandas_series_str_impl
@sdc_overload_attribute(SeriesType, 'ndim')
def hpat_pandas_series_ndim(self):
"""
Pandas Series attribute :attr:`pandas.Series.ndim` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_ndim
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`int`
Number of dimensions of the underlying data, by definition 1
"""
_func_name = 'Attribute ndim.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_ndim_impl(self):
return 1
return hpat_pandas_series_ndim_impl
@sdc_overload_attribute(SeriesType, 'T')
def hpat_pandas_series_T(self):
"""
Pandas Series attribute :attr:`pandas.Series.T` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_T
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`numpy.ndarray`
An array representing the underlying data
"""
_func_name = 'Attribute T.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_T_impl(self):
return self._data
return hpat_pandas_series_T_impl
@sdc_overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@sdc_overload_method(SeriesType, 'astype')
def hpat_pandas_series_astype(self, dtype, copy=True, errors='raise'):
"""
Pandas Series method :meth:`pandas.Series.astype` implementation.
Cast a pandas object to a specified dtype dtype
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_astype*
Parameters
-----------
dtype : :obj:`numpy.dtype` or :obj:`dict`
Use a numpy.dtype or Python type to cast entire pandas object to the same type.
Alternatively, use {col: dtype, …}, where col is a column label and dtype is a numpy.dtype
or Python type to cast one or more of the DataFrame’s columns to column-specific types.
copy : :obj:`bool`, default :obj:`True`
Return a copy when True
Currently copy=False is not supported
errors : :obj:`str`, default :obj:`'raise'`
Control raising of exceptions on invalid data for provided dtype.
* raise : allow exceptions to be raised
* ignore : suppress exceptions. On error return original object
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` Cast a :obj:`pandas.Series` to a specified dtype dtype
"""
_func_name = 'Method astype().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(copy, (types.Omitted, bool, types.Boolean)):
raise TypingError('{} The object must be a boolean. Given copy: {}'.format(_func_name, copy))
if (not isinstance(errors, (types.Omitted, str, types.UnicodeType, types.StringLiteral)) and
errors in ('raise', 'ignore')):
raise TypingError('{} The object must be a string literal. Given errors: {}'.format(_func_name, errors))
# Return StringArray for astype(str) or astype('str')
def hpat_pandas_series_astype_to_str_impl(self, dtype, copy=True, errors='raise'):
num_chars = 0
arr_len = len(self._data)
# Get total chars for new array
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
num_chars += len(str(item)) # TODO: check NA
data = sdc.str_arr_ext.pre_alloc_string_array(arr_len, num_chars)
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
data[i] = str(item) # TODO: check NA
return pandas.Series(data, self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.functions.NumberClass), example - astype(np.int64)
def hpat_pandas_series_astype_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(dtype), self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.StringLiteral), example - astype('int64')
def hpat_pandas_series_astype_literal_type_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(numpy.dtype(dtype)), self._index, self._name)
# Return self
def hpat_pandas_series_astype_no_modify_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data, self._index, self._name)
if ((isinstance(dtype, types.Function) and dtype.typing_key == str)
or (isinstance(dtype, types.StringLiteral) and dtype.literal_value == 'str')):
return hpat_pandas_series_astype_to_str_impl
# Needs Numba astype impl support converting unicode_type to NumberClass and other types
if isinstance(self.data, StringArrayType):
if isinstance(dtype, types.functions.NumberClass) and errors == 'raise':
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype}')
if isinstance(dtype, types.StringLiteral) and errors == 'raise':
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype.literal_value}')
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.functions.NumberClass):
return hpat_pandas_series_astype_numba_impl
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.StringLiteral):
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
return hpat_pandas_series_astype_literal_type_numba_impl
# Raise error if dtype is not supported
if errors == 'raise':
raise TypingError(f'{_func_name} The object must be a supported type. Given dtype: {dtype}')
else:
return hpat_pandas_series_astype_no_modify_impl
@sdc_overload_method(SeriesType, 'shift')
def hpat_pandas_series_shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Pandas Series method :meth:`pandas.Series.shift` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_fill_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
periods: :obj:`int`
Number of periods to shift. Can be positive or negative.
freq: :obj:`DateOffset`, :obj:`tseries.offsets`, :obj:`timedelta`, :obj:`str`
Offset to use from the tseries module or time rule (e.g. ‘EOM’).
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
fill_value : :obj:`int`, :obj:`float`
The scalar value to use for newly introduced missing values.
Returns
-------
:obj:`scalar`
returns :obj:`series` object
"""
_func_name = 'Method shift().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
raise TypingError('{} The object must be a number. Given fill_value: {}'.format(_func_name, fill_value))
if not isinstance(freq, (types.Omitted, types.NoneType)) and freq is not None:
raise TypingError('{} Unsupported parameters. Given freq: {}'.format(_func_name, freq))
if not isinstance(axis, (types.Omitted, int, types.Integer)) and not axis:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
fill_is_default = isinstance(fill_value, (types.Omitted, types.NoneType)) or fill_value is None
series_np_dtype = [numpy_support.as_dtype(self.data.dtype)]
fill_np_dtype = [numpy.float64 if fill_is_default else numpy_support.as_dtype(fill_value)]
fill_dtype = types.float64 if fill_is_default else fill_value
common_dtype = find_common_dtype_from_numpy_dtypes([], [self.data.dtype, fill_dtype])
if fill_is_default:
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(shape=len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = numpy.nan
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = numpy.nan
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = fill_value
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = fill_value
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
@sdc_overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series(data=[(x in values) for x in self._data], index=self._index, name=self._name)
return hpat_pandas_series_isin_impl
@sdc_overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_append*
Parameters
-----------
self: :obj:`pandas.Series`
input series
to_append : :obj:`pandas.Series` object or :obj:`list` or :obj:`set`
Series (or list or tuple of Series) to append with self
ignore_index: :obj:`bool`, default False
If True, do not use the index labels.
Supported as literal value only
verify_integrity: :obj:`bool`, default False
If True, raise Exception on creating index with duplicates.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
Concatenated Series
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not (isinstance(to_append, SeriesType)
or (isinstance(to_append, (types.UniTuple, types.List)) and isinstance(to_append.dtype, SeriesType))):
raise TypingError(
'{} The argument must be a pandas.series or list/tuple of pandas.series. \
Given to_append: {}'.format(_func_name, to_append))
# currently we will always raise this in the end, i.e. if no impl was found
# TODO: find a way to stop compilation early and not proceed with unliteral step
if not (isinstance(ignore_index, types.Literal) and isinstance(ignore_index, types.Boolean)
or isinstance(ignore_index, types.Omitted)
or ignore_index is False):
raise TypingError(
'{} The ignore_index must be a literal Boolean constant. Given: {}'.format(_func_name, ignore_index))
if not (verify_integrity is False or isinstance(verify_integrity, types.Omitted)):
raise TypingError(
'{} Unsupported parameters. Given verify_integrity: {}'.format(_func_name, verify_integrity))
# ignore_index value has to be known at compile time to select between implementations with different signatures
ignore_index_is_false = (common_functions.has_literal_value(ignore_index, False)
or common_functions.has_python_value(ignore_index, False)
or isinstance(ignore_index, types.Omitted))
to_append_is_series = isinstance(to_append, SeriesType)
if ignore_index_is_false:
def hpat_pandas_series_append_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
new_index = common_functions.hpat_arrays_append(self.index, to_append.index)
else:
data_arrays_to_append = [series._data for series in to_append]
index_arrays_to_append = [series.index for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, data_arrays_to_append)
new_index = common_functions.hpat_arrays_append(self.index, index_arrays_to_append)
return pandas.Series(new_data, new_index)
return hpat_pandas_series_append_impl
else:
def hpat_pandas_series_append_ignore_index_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
else:
arrays_to_append = [series._data for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, arrays_to_append)
return pandas.Series(new_data, None)
return hpat_pandas_series_append_ignore_index_impl
@sdc_overload_method(SeriesType, 'copy')
def hpat_pandas_series_copy(self, deep=True):
"""
Pandas Series method :meth:`pandas.Series.copy` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_str1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_int1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_deep
Parameters
-----------
self: :class:`pandas.Series`
input arg
deep: :obj:`bool`, default :obj:`True`
Make a deep copy, including a copy of the data and the indices.
With deep=False neither the indices nor the data are copied.
[SDC limitations]:
- deep=False: shallow copy of index is not supported
Returns
-------
:obj:`pandas.Series` or :obj:`pandas.DataFrame`
Object type matches caller.
"""
ty_checker = TypeChecker('Method Series.copy().')
ty_checker.check(self, SeriesType)
if not isinstance(deep, (types.Omitted, types.Boolean)) and not deep:
ty_checker.raise_exc(self.data, 'boolean', 'deep')
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), name=self._name)
else:
return pandas.Series(data=self._data, name=self._name)
return hpat_pandas_series_copy_impl
else:
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), index=self._index.copy(), name=self._name)
else:
# Shallow copy of index is not supported yet
return pandas.Series(data=self._data, index=self._index.copy(), name=self._name)
return hpat_pandas_series_copy_impl
@sdc_overload_method(SeriesType, 'corr')
def hpat_pandas_series_corr(self, other, method='pearson', min_periods=None):
"""
Pandas Series method :meth:`pandas.Series.corr` implementation.
Note: Unsupported mixed numeric and string data
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_period
Parameters
----------
self: :obj:`pandas.Series`
input series
other: :obj:`pandas.Series`
input series
method:
*unsupported
min_periods: :obj:`int`, default None
Returns
-------
:obj:`float`
returns :obj:`float` object
"""
ty_checker = TypeChecker('Method corr().')
ty_checker.check(self, SeriesType)
ty_checker.check(other, SeriesType)
if not isinstance(self.data.dtype, types.Number):
ty_checker.raise_exc(self.data, 'number', 'self.data')
if not isinstance(other.data.dtype, types.Number):
ty_checker.raise_exc(other.data, 'number', 'other.data')
if not isinstance(min_periods, (int, types.Integer, types.Omitted, types.NoneType)) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'int64', 'min_periods')
def hpat_pandas_series_corr_impl(self, other, method='pearson', min_periods=None):
if min_periods is None:
min_periods = 1
if len(self._data) == 0 or len(other._data) == 0:
return numpy.nan
self_arr = self._data[:min(len(self._data), len(other._data))]
other_arr = other._data[:min(len(self._data), len(other._data))]
invalid = numpy.isnan(self_arr) | numpy.isnan(other_arr)
if invalid.any():
self_arr = self_arr[~invalid]
other_arr = other_arr[~invalid]
if len(self_arr) < min_periods:
return numpy.nan
new_self = pandas.Series(self_arr)
new_other = pandas.Series(other_arr)
n = new_self.count()
ma = new_self.sum()
mb = new_other.sum()
a = n * (self_arr * other_arr).sum() - ma * mb
b1 = n * (self_arr * self_arr).sum() - ma * ma
b2 = n * (other_arr * other_arr).sum() - mb * mb
if b1 == 0 or b2 == 0:
return numpy.nan
return a / numpy.sqrt(b1 * b2)
return hpat_pandas_series_corr_impl
@sdc_overload_method(SeriesType, 'head')
def hpat_pandas_series_head(self, n=5):
"""
Pandas Series method :meth:`pandas.Series.head` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_head*
Parameters
-----------
n: :obj:`int`, default 5
input argument, default 5
Returns
-------
:obj:`pandas.Series`
returns: The first n rows of the caller object.
"""
_func_name = 'Method head().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(n, (types.Integer, types.Omitted)) and n != 5:
raise TypingError('{} The parameter must be an integer type. Given type n: {}'.format(_func_name, n))
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_head_impl(self, n=5):
return pandas.Series(data=self._data[:n], name=self._name)
return hpat_pandas_series_head_impl
else:
def hpat_pandas_series_head_index_impl(self, n=5):
return pandas.Series(data=self._data[:n], index=self._index[:n], name=self._name)
return hpat_pandas_series_head_index_impl
@sdc_overload_method(SeriesType, 'groupby')
def hpat_pandas_series_groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
"""
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_groupby_count
Parameters
-----------
self: :class:`pandas.Series`
input arg
by: :obj:`pandas.Series` object
Used to determine the groups for the groupby
axis:
*unsupported*
level:
*unsupported*
as_index:
*unsupported*
sort:
*unsupported*
group_keys:
*unsupported*
squeeze:
*unsupported*
observed:
*unsupported*
Returns
-------
:obj:`pandas.SeriesGroupBy`
returns :obj:`pandas.SeriesGroupBy` object
"""
_func_name = 'Method Series.groupby().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if by is None and axis is None:
raise TypingError("{} You have to supply one of 'by' or 'axis' parameters".format(_func_name))
if level is not None and not isinstance(level, (types.Integer, types.NoneType, types.Omitted)):
raise TypingError("{} 'level' must be an Integer. Given: {}".format(_func_name, level))
def hpat_pandas_series_groupby_impl(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
# TODO Needs to implement parameters value check
# if level is not None and (level < -1 or level > 0):
# raise ValueError("Method Series.groupby(). level > 0 or level < -1 only valid with MultiIndex")
return pandas.core.groupby.SeriesGroupBy(self)
return hpat_pandas_series_groupby_impl
@sdc_overload_method(SeriesType, 'isnull')
@sdc_overload_method(SeriesType, 'isna')
def hpat_pandas_series_isna(self):
"""
Pandas Series method :meth:`pandas.Series.isna` and :meth:`pandas.Series.isnull` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isnull1
Parameters
-----------
self : :obj:`pandas.Series` object
input argument
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method isna/isnull().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if isinstance(self.data.dtype, (types.Integer, types.Float)):
def hpat_pandas_series_isna_impl(self):
return pandas.Series(data=numpy.isnan(self._data), index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_isna_impl(self):
result = numpy.empty(len(self._data), numpy.bool_)
byte_size = 8
# iterate over bits in StringArrayType null_bitmap and fill array indicating if array's element are NaN
for i in range(len(self._data)):
bmap_idx = i // byte_size
bit_idx = i % byte_size
bmap = self._data.null_bitmap[bmap_idx]
bit_value = (bmap >> bit_idx) & 1
result[i] = bit_value == 0
return pandas.Series(result, index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
@sdc_overload_method(SeriesType, 'notna')
def hpat_pandas_series_notna(self):
"""
Pandas Series method :meth:`pandas.Series.notna` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_notna*
Parameters
-----------
self : :obj:`pandas.Series` object
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method notna().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.data.dtype, types.Number):
def hpat_pandas_series_notna_impl(self):
return pandas.Series(numpy.invert(numpy.isnan(self._data)), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_notna_impl(self):
result = self.isna()
return pandas.Series(numpy.invert(result._data), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
@sdc_overload_method(SeriesType, 'ne')
def hpat_pandas_series_ne(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ne` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ne().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data != other._data)
return hpat_pandas_series_ne_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data != other)
return hpat_pandas_series_ne_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'add')
def hpat_pandas_series_add(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.add
Examples
--------
.. literalinclude:: ../../../examples/series/series_add.py
:language: python
:lines: 27-
:caption: Getting the addition of Series and other
:name: ex_series_add
.. command-output:: python ./series/series_add.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.radd <pandas.Series.radd>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.add` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method add().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_add_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data + other._data)
return hpat_pandas_series_add_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_add_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method add(). The object axis\n expected: 0')
return pandas.Series(self._data + other)
return hpat_pandas_series_add_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'sub')
def hpat_pandas_series_sub(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.sub` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method sub().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_sub_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data - other._data)
return hpat_pandas_series_sub_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_sub_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data - other)
return hpat_pandas_series_sub_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'sum')
def hpat_pandas_series_sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Pandas Series method :meth:`pandas.Series.sum` implementation.
.. only:: developer
Tests:
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_sum*
Parameters
----------
self: :class:`pandas.Series`
input series
axis:
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude NA/null values when computing the result.
level:
*unsupported*
numeric_only:
*unsupported*
min_count:
*unsupported*
Returns
-------
:obj:`float`
scalar or Series (if level specified)
"""
_func_name = 'Method sum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, (types.Integer, types.Omitted)) or axis is None):
raise TypingError('{} The axis must be an Integer. Currently unsupported. Given: {}'.format(_func_name, axis))
if not (isinstance(skipna, (types.Boolean, types.Omitted, types.NoneType)) or skipna is None):
raise TypingError('{} The skipna must be a Boolean. Given: {}'.format(_func_name, skipna))
if not (isinstance(level, (types.Integer, types.StringLiteral, types.Omitted, types.NoneType)) or level is None):
raise TypingError(
'{} The level must be an Integer or level name. Currently unsupported. Given: {}'.format(
_func_name, level))
if not (isinstance(numeric_only, (types.Boolean, types.Omitted)) or numeric_only is None):
raise TypingError(
'{} The numeric_only must be a Boolean. Currently unsupported. Given: {}'.format(
_func_name, numeric_only))
if not (isinstance(min_count, (types.Integer, types.Omitted)) or min_count == 0):
raise TypingError(
'{} The min_count must be an Integer. Currently unsupported. Given: {}'.format(
_func_name, min_count))
def hpat_pandas_series_sum_impl(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_sum1
"""
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nansum(self._data)
return numpy.sum(self._data)
return hpat_pandas_series_sum_impl
@sdc_overload_method(SeriesType, 'take')
def hpat_pandas_series_take(self, indices, axis=0, is_copy=False):
"""
Pandas Series method :meth:`pandas.Series.take` implementation.
.. only:: developer
Tests: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str_unboxing
Parameters
----------
self: :obj:`pandas.Series`
input series
indices: :obj:`array-like`
An array of ints indicating which positions to take
axis: {0 or `index`, 1 or `columns`, None}, default 0
The axis on which to select elements. 0 means that we are selecting rows,
1 means that we are selecting columns.
*unsupported*
is_copy: :obj:`bool`, default True
Whether to return a copy of the original object or not.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object containing the elements taken from the object
"""
ty_checker = TypeChecker('Method take().')
ty_checker.check(self, SeriesType)
if (not isinstance(axis, (int, types.Integer, str, types.UnicodeType, types.StringLiteral, types.Omitted))
and axis not in (0, 'index')):
ty_checker.raise_exc(axis, 'integer or string', 'axis')
if not isinstance(is_copy, (bool, types.Boolean, types.Omitted)) and is_copy is not False:
ty_checker.raise_exc(is_copy, 'boolean', 'is_copy')
if not isinstance(indices, (types.List, types.Array)):
ty_checker.raise_exc(indices, 'array-like', 'indices')
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_take_noindex_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
return pandas.Series(local_data, indices)
return hpat_pandas_series_take_noindex_impl
def hpat_pandas_series_take_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
local_index = [self._index[i] for i in indices]
return pandas.Series(local_data, local_index)
return hpat_pandas_series_take_impl
@sdc_overload_method(SeriesType, 'idxmax')
def hpat_pandas_series_idxmax(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmax` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmax().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmax_impl(self, axis=None, skipna=True):
return numpy.argmax(self._data)
return hpat_pandas_series_idxmax_impl
else:
def hpat_pandas_series_idxmax_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmax is supported by Numba at this time
result = numpy.argmax(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmax_index_impl
@sdc_overload_method(SeriesType, 'mul')
def hpat_pandas_series_mul(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.mul
Examples
--------
.. literalinclude:: ../../../examples/series/series_mul.py
:language: python
:lines: 27-
:caption: Element-wise multiplication of two Series
:name: ex_series_mul
.. command-output:: python ./series/series_mul.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rmul <pandas.Series.rmul>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.mul` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mul().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(level, types.Omitted) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, types.Omitted) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not isinstance(axis, types.Omitted) and axis != 0:
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_mul_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other._data)
return hpat_pandas_series_mul_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mul_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other)
return hpat_pandas_series_mul_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'div')
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.div
Examples
--------
.. literalinclude:: ../../../examples/series/series_div.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator div)
:name: ex_series_div
.. command-output:: python ./series/series_div.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rdiv <pandas.Series.rdiv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.div` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'truediv')
def hpat_pandas_series_truediv(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.truediv
Examples
--------
.. literalinclude:: ../../../examples/series/series_truediv.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator truediv)
:name: ex_series_truediv
.. command-output:: python ./series/series_truediv.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rtruediv <pandas.Series.rtruediv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method truediv().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_truediv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_truediv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_truediv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_truediv_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'floordiv')
def hpat_pandas_series_floordiv(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.floordiv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method floordiv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_floordiv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data // other._data)
return hpat_pandas_series_floordiv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_floordiv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data // other)
return hpat_pandas_series_floordiv_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'pow')
def hpat_pandas_series_pow(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.pow` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method pow().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data ** other._data)
return hpat_pandas_series_pow_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data ** other)
return hpat_pandas_series_pow_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'prod')
def hpat_pandas_series_prod(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
"""
Pandas Series method :meth:`pandas.Series.prod` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_prod*
Parameters
-----------
self: :obj:`pandas.Series`
input series
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude nan values when computing the result
level: :obj:`int`, :obj:`str`, default :obj:`None`
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default :obj:`None`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
min_count: :obj:`int`, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result will be NA.
*unsupported*
Returns
-------
:obj:
Returns scalar or Series (if level specified)
"""
_func_name = 'Method prod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError('{} Non numeric values unsupported. Given: {}'.format(_func_name, self.data.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is None or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, (types.Omitted, types.NoneType)) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, (types.Omitted, types.NoneType)) or numeric_only is None) \
or not (isinstance(min_count, (types.Omitted, types.Integer)) or min_count == 0):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}, min_count: {}'.format(
_func_name, axis, level, numeric_only, min_count))
def hpat_pandas_series_prod_impl(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanprod(self._data)
else:
return numpy.prod(self._data)
return hpat_pandas_series_prod_impl
@sdc_overload_method(SeriesType, 'quantile')
def hpat_pandas_series_quantile(self, q=0.5, interpolation='linear'):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.quantile
Examples
--------
.. literalinclude:: ../../../examples/series/series_quantile.py
:language: python
:lines: 27-
:caption: Computing quantile for the Series
:name: ex_series_quantile
.. command-output:: python ./series/series_quantile.py
:cwd: ../../../examples
.. note::
Parameter interpolation is currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html#numpy.percentile>`_
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.quantile` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile_q_vector
Parameters
-----------
q : :obj: float or array-like object, default 0.5
the quantile(s) to compute
interpolation: 'linear', 'lower', 'higher', 'midpoint', 'nearest', default `linear`
*unsupported* by Numba
Returns
-------
:obj:`pandas.Series` or float
"""
_func_name = 'Method quantile().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(interpolation, types.Omitted) and interpolation is not 'linear':
ty_checker.raise_exc(interpolation, 'str', 'interpolation')
if not isinstance(q, (int, float, list, types.Number, types.Omitted, types.List)):
ty_checker.raise_exc(q, 'int, float, list', 'q')
def hpat_pandas_series_quantile_impl(self, q=0.5, interpolation='linear'):
return numpy.quantile(self._data, q)
return hpat_pandas_series_quantile_impl
@sdc_overload_method(SeriesType, 'rename')
def hpat_pandas_series_rename(self, index=None, copy=True, inplace=False, level=None):
"""
Pandas Series method :meth:`pandas.Series.rename` implementation.
Alter Series index labels or name.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_rename
Parameters
-----------
index : :obj:`scalar` or `hashable sequence` or `dict` or `function`
Dict-like or functions are transformations to apply to the index.
Scalar or hashable sequence-like will alter the Series.name attribute.
Only scalar value is supported.
copy : :obj:`bool`, default :obj:`True`
Whether to copy underlying data.
inplace : :obj:`bool`, default :obj:`False`
Whether to return a new Series. If True then value of copy is ignored.
level : :obj:`int` or `str`
In case of a MultiIndex, only rename labels in the specified level.
*Not supported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` with index labels or name altered.
"""
ty_checker = TypeChecker('Method rename().')
ty_checker.check(self, SeriesType)
if not isinstance(index, (types.Omitted, types.UnicodeType,
types.StringLiteral, str,
types.Integer, types.Boolean,
types.Hashable, types.Float,
types.NPDatetime, types.NPTimedelta,
types.Number)) and index is not None:
ty_checker.raise_exc(index, 'string', 'index')
if not isinstance(copy, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(copy, 'boolean', 'copy')
if not isinstance(inplace, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(inplace, 'boolean', 'inplace')
if not isinstance(level, (types.Omitted, types.UnicodeType,
types.StringLiteral, types.Integer)) and level is not None:
ty_checker.raise_exc(level, 'Integer or srting', 'level')
def hpat_pandas_series_rename_idx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
series_index = self._index.copy()
else:
series_data = self._data
series_index = self._index
return pandas.Series(data=series_data, index=series_index, name=index)
def hpat_pandas_series_rename_noidx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
else:
series_data = self._data
return pandas.Series(data=series_data, index=self._index, name=index)
if isinstance(self.index, types.NoneType):
return hpat_pandas_series_rename_noidx_impl
return hpat_pandas_series_rename_idx_impl
@sdc_overload_method(SeriesType, 'min')
def hpat_pandas_series_min(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.min` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_min*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method min().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not True \
and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_min_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmin(self._data)
return self._data.min()
return hpat_pandas_series_min_impl
@sdc_overload_method(SeriesType, 'max')
def hpat_pandas_series_max(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.max
Examples
--------
.. literalinclude:: ../../../examples/series/series_max.py
:language: python
:lines: 27-
:caption: Getting the maximum value of Series elements
:name: ex_series_max
.. command-output:: python ./series/series_max.py
:cwd: ../../../examples
.. note::
Parameters axis, level, numeric_only are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.sum <pandas.Series.sum>`
Return the sum.
:ref:`Series.min <pandas.Series.min>`
Return the minimum.
:ref:`Series.max <pandas.Series.max>`
Return the maximum.
:ref:`Series.idxmin <pandas.Series.idxmin>`
Return the index of the minimum.
:ref:`Series.idxmax <pandas.Series.idxmax>`
Return the index of the maximum.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.max` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_max*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method max().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is True or skipna is None):
ty_checker.raise_exc(skipna, 'bool', 'skipna')
if not isinstance(axis, types.Omitted) and axis is not None:
ty_checker.raise_exc(axis, 'None', 'axis')
if not isinstance(level, (types.Omitted, types.NoneType)) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(numeric_only, types.Omitted) and numeric_only is not None:
ty_checker.raise_exc(numeric_only, 'None', 'numeric_only')
def hpat_pandas_series_max_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmax(self._data)
return self._data.max()
return hpat_pandas_series_max_impl
@sdc_overload_method(SeriesType, 'mean')
def hpat_pandas_series_mean(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.mean` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_mean*
Parameters
-----------
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default True
Exclude NA/null values when computing the result.
level: :obj:`int` or level name, default None
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default None
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data. Not implemented for Series.
*unsupported*
Returns
-------
:obj:
Return the mean of the values for the requested axis.
"""
_func_name = 'Method mean().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_mean_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmean(self._data)
return self._data.mean()
return hpat_pandas_series_mean_impl
@sdc_overload_method(SeriesType, 'mod')
def hpat_pandas_series_mod(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.mod` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data % other._data)
return hpat_pandas_series_mod_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data % other)
return hpat_pandas_series_mod_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'eq')
def hpat_pandas_series_eq(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.eq` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method eq().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data == other._data)
return hpat_pandas_series_eq_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data == other)
return hpat_pandas_series_eq_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'ge')
def hpat_pandas_series_ge(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ge` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ge().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data >= other._data)
return hpat_pandas_series_ge_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data >= other)
return hpat_pandas_series_ge_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'idxmin')
def hpat_pandas_series_idxmin(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_no
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_int
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmin().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmin_impl(self, axis=None, skipna=True):
return numpy.argmin(self._data)
return hpat_pandas_series_idxmin_impl
else:
def hpat_pandas_series_idxmin_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmin is supported by Numba at this time
result = numpy.argmin(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmin_index_impl
@sdc_overload_method(SeriesType, 'lt')
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'gt')
def hpat_pandas_series_gt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.gt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method gt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data > other._data)
return hpat_pandas_series_gt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data > other)
return hpat_pandas_series_gt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'le')
def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.le` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method le().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data <= other._data)
return hpat_pandas_series_le_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data <= other)
return hpat_pandas_series_le_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'abs')
def hpat_pandas_series_abs(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.abs
Examples
--------
.. literalinclude:: ../../../examples/series/series_abs.py
:language: python
:lines: 27-
:caption: Getting the absolute value of each element in Series
:name: ex_series_abs
.. command-output:: python ./series/series_abs.py
:cwd: ../../../examples
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.absolute.html>`_
Calculate the absolute value element-wise.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.abs` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_abs1
Parameters
-----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` containing the absolute value of elements
"""
_func_name = 'Method abs().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} The function only applies to elements that are all numeric. Given data type: {}'.format(_func_name,
self.dtype))
def hpat_pandas_series_abs_impl(self):
return pandas.Series(numpy.abs(self._data))
return hpat_pandas_series_abs_impl
@sdc_overload_method(SeriesType, 'unique')
def hpat_pandas_series_unique(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.unique
Examples
--------
.. literalinclude:: ../../../examples/series/series_unique.py
:language: python
:lines: 27-
:caption: Getting unique values in Series
:name: ex_series_unique
.. command-output:: python ./series/series_unique.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.unique` implementation.
Note: Return values order is unspecified
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_sorted
Parameters
-----------
self: :class:`pandas.Series`
input arg
Returns
-------
:obj:`numpy.array`
returns :obj:`numpy.array` ndarray
"""
ty_checker = TypeChecker('Method unique().')
ty_checker.check(self, SeriesType)
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_unique_str_impl(self):
'''
Returns sorted unique elements of an array
Note: Can't use Numpy due to StringArrayType has no ravel() for noPython mode.
Also, NotImplementedError: unicode_type cannot be represented as a Numpy dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_str
'''
str_set = set(self._data)
return to_array(str_set)
return hpat_pandas_series_unique_str_impl
def hpat_pandas_series_unique_impl(self):
'''
Returns sorted unique elements of an array
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique
'''
return numpy.unique(self._data)
return hpat_pandas_series_unique_impl
@sdc_overload_method(SeriesType, 'cumsum')
def hpat_pandas_series_cumsum(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.cumsum` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unsupported_axis
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
*args:
*unsupported*
Returns
-------
:obj:`scalar`, :obj:`pandas.Series`
returns :obj:`scalar` or :obj:`pandas.Series` object
"""
_func_name = 'Method cumsum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(axis, (types.Omitted, types.NoneType)) and axis is not None:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
def hpat_pandas_series_cumsum_impl(self, axis=None, skipna=True):
if skipna:
# nampy.nancumsum replaces NANs with 0, series.cumsum does not, so replace back 0 with NANs
local_data = numpy.nancumsum(self._data)
local_data[numpy.isnan(self._data)] = numpy.nan
return | pandas.Series(local_data) | pandas.Series |
import pandas as pd
from fbprophet import Prophet
from fbprophet.plot import add_changepoints_to_plot
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from fbprophet.plot import plot_cross_validation_metric
from time import gmtime, strftime
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
class KPIForecaster():
def __init__(self, conf):
# store the configuration object
self.conf = conf
def makeDir(self, path):
if os.path.exists(path):
pass
else:
os.makedirs(path)
def getTrainingData(self, df_kpi, cell, KPI = 'DL_USER_THROUGHPUT_MBPS'):
# create blank dataframe
df = pd.DataFrame()
# Get cell specific info
cell_df = df_kpi[df_kpi["CELL_NAME"] == cell].copy()
# Convert to pandas datatime format
df['ds'] = pd.to_datetime(cell_df['START_TIME'])
#print(df.info())
# Extract KPI
df['y'] = cell_df[KPI]
# Sort by date
df = df.sort_values("ds")
# Edit datatime format so we can remove timestamp format and filter by date (YY-MM-DD)
df['Date'] = df['ds'].dt.strftime('%d/%m/%y')
try:
if self.conf["filter_training_period"] == "Yes":
print("Filtering by Dates")
df['Date'] = pd.to_datetime(df['Date'])
start_date = self.conf["training_data_start_date"]
end_date = self.conf["training_data_end_date"]
mask = (df['Date'] >= start_date) & (df['Date'] <= end_date)
df = df.loc[mask]
df['y'].replace(0, np.nan, inplace=True)
df['y'].fillna((df['y'].mean()), inplace=True)
except:
raise Exception("Invalid Date Format, please use YYYY-MM-DD")
return df
def getForecast(self, df):
prophet = Prophet(changepoint_prior_scale = self.conf["changepoint_prior_scale"],
seasonality_mode= self.conf["seasonality_mode"]
)
future = pd.DataFrame()
df['cap'] = 200
df['floor'] = 0
future['cap'] = 200
future['floor'] = 0
with suppress_stdout_stderr():
m = prophet.fit(df)
future = prophet.make_future_dataframe(periods=24*self.conf["forecast_days"], freq='H')
forecast = prophet.predict(future)
fig = prophet.plot(forecast)
return prophet, forecast, m
def saveModel(self, prophet, m, forecast, cell, KPI = "DL_USER_THROUGHPUT_MBPS"):
import pickle
#folder_name = strftime("%Y_%m_%d", gmtime())
folder_name = "new"
pkl_path = "./models/" + KPI +"/" + folder_name + "/" + str(cell ) + ".pkl"
self.makeDir("./models/" + KPI +"/" + folder_name)
with open(pkl_path, "wb") as f:
# Pickle the 'Prophet' model using the highest protocol available.
pickle.dump(m, f)
# save the dataframe
pkl_fore_cast_path = "./models/" + KPI +"/" + folder_name + "/" + str(cell ) + "_forecast.pkl"
forecast.to_pickle(pkl_fore_cast_path)
fig_file_name = "./models/" + KPI +"/" + folder_name + "/" + str(cell ) + "_plot.jpg"
fig = prophet.plot(forecast)
fig.savefig(fig_file_name, bbox_inches='tight', pad_inches=0)
plt.close(fig)
def analyzeData(self, forecast, df_last_day, last_day, cell):
#cell = "TNTAA405_L02A"
forecast['Date'] = forecast['ds'].dt.strftime('%d/%m/%y')
forecast['pred_upper_15'] = forecast['yhat_upper'] *(1+self.conf["threshold_margin"])
forecast['pred_lower_15'] = forecast['yhat_lower'] * (1-self.conf["threshold_margin"])
forecast['CELL_NAME'] = cell
# Get last 24 hours
forecast_last_day = forecast.loc[forecast['Date'] == last_day]
forecast_last_day = forecast_last_day[['CELL_NAME','ds', 'Date','pred_upper_15','pred_lower_15','yhat']]
result = pd.merge(forecast_last_day.reset_index(), df_last_day.reset_index(), on=['ds'], how='inner')
foreLD = result[['CELL_NAME','ds', 'Date_x','pred_upper_15','pred_lower_15','yhat','y']]
foreLD.columns = ['CELL_NAME','ds', 'Date','pred_upper_15','pred_lower_15','Expected_Value','Actual_Value']
pd.options.mode.chained_assignment = None
foreLD['Exceeds_Thresh'] = foreLD['Actual_Value'] >= foreLD['pred_upper_15']
foreLD['Under_Thresh'] = foreLD['Actual_Value'] <= foreLD['pred_lower_15']
foreLD.loc[(foreLD['Exceeds_Thresh'] == True) | (foreLD['Under_Thresh'] == True), 'Investigate_Cell'] = True
foreLD.loc[(foreLD['Under_Thresh'] != True) & (foreLD['Under_Thresh'] != True), 'Investigate_Cell'] = False
return foreLD
def getForecastData(self, cell, KPI):
mypath = "./models/" + KPI
subfolder = [f.path for f in os.scandir(mypath) if f.is_dir()][0]
file_names = [f for f in listdir(subfolder) if isfile(join(subfolder, f))]
file_names.sort(key=lambda x: os.stat(os.path.join(subfolder, x)).st_mtime)
file_name = cell + "_forecast.pkl"
path = subfolder + "/" + file_name
try:
unpickled_df = pd.read_pickle(path)
return unpickled_df
except:
raise Exception("Models not found")
def getLastDay(self, df_kpi, KPI = 'DL_USER_THROUGHPUT_MBPS', cell = ''):
# create blank dataframe
df = pd.DataFrame()
# Get cell specific info
cell_df = df_kpi[df_kpi["CELL_NAME"] == cell].copy()
# Convert to pandas datatime format
df['ds'] = | pd.to_datetime(cell_df['START_TIME']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = | timedelta_range('9H', freq='H', periods=3) | pandas.timedelta_range |
"""Alpha Vantage Model"""
__docformat__ = "numpy"
import logging
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import requests
from alpha_vantage.fundamentaldata import FundamentalData
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import long_number_format
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.fundamental_analysis.fa_helper import clean_df_index
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame()
@log_start_end(log=logger)
def get_key_metrics(ticker: str) -> pd.DataFrame:
"""Get key metrics from overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of key metrics
"""
# Request OVERVIEW data
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
df_fa = pd.json_normalize(result.json())
df_fa = df_fa[list(result.json().keys())].T
df_fa = df_fa.applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Return on equity t t m": "Return on equity TTM",
"Price to sales ratio t t m": "Price to sales ratio TTM",
}
)
as_key_metrics = [
"Market capitalization",
"EBITDA",
"EPS",
"PE ratio",
"PEG ratio",
"Price to book ratio",
"Return on equity TTM",
"Price to sales ratio TTM",
"Dividend yield",
"50 day moving average",
"Analyst target price",
"Beta",
]
return df_fa.loc[as_key_metrics]
return pd.DataFrame()
@log_start_end(log=logger)
def get_income_statements(
ticker: str, number: int, quarterly: bool = False
) -> pd.DataFrame:
"""Get income statements for company
Parameters
----------
ticker : str
Stock ticker
number : int
Number of past to get
quarterly : bool, optional
Flag to get quarterly instead of annual, by default False
Returns
-------
pd.DataFrame
Dataframe of income statements
"""
url = f"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
r = requests.get(url)
if r.status_code == 200:
statements = r.json()
df_fa = | pd.DataFrame() | pandas.DataFrame |
import scanpy as sc
import pandas as pd
import numpy as np
import anndata as ad
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import gseapy as gp
import math
import os
def check_filter_single_cluster(adata,key):
vc = adata.obs[key].value_counts()
exclude_clusters= vc.loc[vc==1].index
truth = np.logical_not(adata.obs[key].isin(exclude_clusters).values)
adata_valid = adata[truth,:]
return adata_valid
def doublet_compute(adata,key):
cluster_to_doublet = {}
for cluster in adata.obs[key].astype('category').cat.categories:
mean_score = adata[adata.obs[key]==cluster,:].obs['doublet_scores'].values.mean()
cluster_to_doublet[cluster] = mean_score
return cluster_to_doublet
def compute_combo_score(rank_uns,cluster):
rank_names = rank_uns['names'][cluster]
rank_lfc = rank_uns['logfoldchanges'][cluster]
rank_pval = rank_uns['pvals'][cluster]
df = pd.DataFrame({'names':rank_names,'lfc':rank_lfc,'pval':rank_pval})
# filter out down-regulated genes
df = df.loc[df['lfc'] > 0, :]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
# the rank of each gene by lfc, the larger, the better, make argsort result reverse
temp = np.flip(np.argsort(df['lfc'].values))
ranks_lfc = np.empty_like(temp)
ranks_lfc[temp] = np.arange(len(df['pval'].values))
# the rank of each gene by pval, the smaller, the better
temp = np.argsort(df['pval'].values)
ranks_pval = np.empty_like(temp)
ranks_pval[temp] = np.arange(len(df['pval'].values))
# combo rank score
temp = (ranks_lfc + ranks_pval) / 2
df['rank_lfc'] = ranks_lfc
df['rank_pval'] = ranks_pval
df['combo'] = temp
df.sort_values(by='combo', inplace=True)
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
# filter out the genes if pval > 0.05
df = df.loc[df['pval']<0.05,:]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
return df
def run_enrichr(gene_list,key,name,folder):
# run enrichr
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
enr2 = gp.enrichr(gene_list=gene_list,
description=name,
gene_sets=artifact_dict,
background=20000,
outdir=os.path.join(folder,'scTriangulate_local_mode_enrichr'),
cutoff=0.1, # adj-p for plotting
verbose=True)
enrichr_result = enr2.results
enrichr_dict = {}
for metric in artifact_dict.keys():
if enrichr_result.shape[0] == 0: # no enrichment for any of the above terms
enrichr_dict[metric] = 0
else:
try:
enrichr_score = -math.log10(enrichr_result.loc[enrichr_result['Term']==metric,:]['Adjusted P-value'].to_list()[0])
except IndexError:
enrichr_dict[metric] = 0
else:
enrichr_dict[metric] = enrichr_score
return enrichr_dict
def run_gsea(gene_list,key,name,folder):
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
artifact_dict_keys = list(artifact_dict.keys())
df = pd.DataFrame({0: gene_list, 1: 1/(np.arange(len(gene_list))+1)}) # col 1 is for descending rank of gene
gsea_dict = {}
try:
pre_res = gp.prerank(rnk=df, gene_sets=artifact_dict,
permutation_num=100,
outdir=os.path.join(folder,'scTriangulate_local_mode_gsea/{}/{}'.format(key,name)),
min_size=1,
max_size=10000,
seed=6,
verbose=True) # run this will cause artifact dict decreasing !! Caveats!!!
except: # no hit return, all metrics are zero
for metric in artifact_dict_keys:
gsea_dict[metric] = (0,0) # first is nes, second is #hit
else:
gsea_result = pre_res.res2d
metric_get = set(gsea_result.index.tolist())
for metric in artifact_dict_keys:
if metric in metric_get:
gsea_score = gsea_result.loc[gsea_result.index==metric,:]['nes'].to_list()[0]
gsea_hits = gsea_result.loc[gsea_result.index==metric,:]['matched_size'].to_list()[0]
gsea_dict[metric] = (gsea_score, gsea_hits)
else: # not enriched
gsea_dict[metric] = (0,0)
return gsea_dict
def read_artifact_genes(species,criterion):
'''
criterion1: all will be artifact
criterion2: all will be artifact except cellcycle
criterion3: all will be artifact except cellcycle, ribosome
criterion4: all will be artifact except cellcycle, ribosome, mitochondrial
criterion5: all will be artifact except cellcycle, ribosome, mitochondrial, antisense
criterion6: all will be artifact except cellcycle, ribosome, mitochondrial, antisense, predict_gene
'''
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t',index_col=0)
artifact = artifact.loc[artifact['species']==species,:]
if criterion == 1:
artifact = artifact
elif criterion == 2:
artifact = artifact.loc[~(artifact['class']=='cellcycle'),:]
elif criterion == 3:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcycle')),:]
elif criterion == 4:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')),:]
elif criterion == 5:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')),:]
elif criterion == 6:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')|(artifact['class']=='predict_gene')),:]
return artifact
def purify_gene(genelist,species,criterion):
result = []
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
for gene in genelist:
if gene not in artifact_genes:
result.append(gene)
return result
def marker_gene(adata, key, species, criterion, folder):
# delete previous rank_gene_gruops if present
if adata.uns.get('rank_genes_groups') != None:
del adata.uns['rank_genes_groups']
# perform t-test
sc.tl.rank_genes_groups(adata, key, method='t-test',n_genes=adata.shape[1])
all_genes = adata.var_names.values # ndarray, all the genes
all_clusters = adata.obs[key].cat.categories # pd.Index, all the clusters
cluster2gene = dict() # {'cluster1':[gene1,gene2..]}
rank_uns = adata.uns['rank_genes_groups']
pre_computed_dfs = []
for cluster in all_clusters:
cluster2gene[cluster] = []
df = compute_combo_score(rank_uns, cluster)
pre_computed_dfs.append(df)
for gene in all_genes:
index_store = []
for i,cluster in enumerate(all_clusters):
df = pre_computed_dfs[i]
# get the rank of the gene in each cluster
try:
index = np.nonzero(df['names'].values == gene)[0][0] # the rank of this gene in each cluster
except IndexError:
index = len(all_genes)
index_store.append(index)
if np.all(np.array(index_store) == len(all_genes)):
continue
assign = all_clusters[np.argmin(np.array(index_store))] # get argmin, take the corresponding cluster
cluster2gene[assign].append((gene,np.min(index_store)))
# sort the cluster2gene
for key_,value in cluster2gene.items():
gene = [item[0] for item in value]
rank = [item[1] for item in value]
temp = sorted(zip(gene,rank),key=lambda x:x[1])
cluster2gene[key_] = [item[0] for item in temp]
result = pd.Series(cluster2gene).to_frame()
result.columns = ['whole_marker_genes']
'''
now the result is a dataframe
whole_marker_genes
cluster1 gene_list
cluster2 gene_list
'''
# now let's perform enrichr and GSEA, and get puried marker gene
col_enrichr = []
col_gsea = []
col_purify = [] # genelist that have artifact genes removed
for cluster in result.index:
enrichr_dict = run_enrichr(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder) # [0] because it is a [[gene_list]],we only need [gene_list]
gsea_dict = run_gsea(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder)
purified = purify_gene(result.loc[cluster,:].to_list()[0],species,criterion) # the [0] is explained last line
col_enrichr.append(enrichr_dict)
col_gsea.append(gsea_dict)
col_purify.append(purified)
result['enrichr'] = col_enrichr
result['gsea'] = col_gsea
result['purify'] = col_purify
return result
def reassign_score(adata,key,marker,regress_size=False):
# get gene pool, slice the adata
num = 30
pool = []
for i in range(marker.shape[0]):
marker_genes = marker.iloc[i]['purify']
pick = marker_genes[:num] # if the list doesn't have more than 30 markers, it is oK, python will automatically choose all
pool.extend(pick)
pool = list(set(pool))
adata_now = adata[:,pool].copy()
# mean-centered and divide the std of the data
tmp = adata_now.X
from sklearn.preprocessing import scale
tmp_scaled = scale(tmp,axis=0)
adata_now.X = tmp_scaled
# reducing dimension
from sklearn.decomposition import PCA
reducer = PCA(n_components=30)
scoring = reducer.fit_transform(X=adata_now.X)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
scoring_y = le.fit_transform(adata_now.obs[key].astype('str'))
order = le.classes_
# compute the centroid of each cluster
X = np.empty([len(adata_now.obs[key].cat.categories),scoring.shape[1]])
y = []
for i,cluster in enumerate(adata_now.obs[key].cat.categories):
bool_index = adata_now.obs[key]==cluster
centroid = np.mean(scoring[bool_index,:],axis=0)
X[i,:] = centroid
y.append(cluster)
y = le.fit_transform(y)
# train a KNN classifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
# if number of centroid(training data) < N_neighbors, will raise error, we hard code it to be 10
n_neighbors = 10
if X.shape[0] < n_neighbors:
n_neighbors = X.shape[0]
model = KNeighborsClassifier(n_neighbors=n_neighbors,weights='distance')
model.fit(X,y)
pred = model.predict(scoring) # (n_samples,)
mat = confusion_matrix(scoring_y,pred)
confusion_reassign = pd.DataFrame(data=mat,index=order,columns=order)
accuracy = []
for i in range(mat.shape[0]):
accuracy.append(mat[i,i]/np.sum(mat[i,:]))
cluster_to_accuracy = {}
for i,cluster in enumerate(order):
cluster_to_accuracy[cluster] = accuracy[i]
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_accuracy
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_accuracy = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_accuracy, confusion_reassign
'''below is the part for regression score'''
def background_normalizer(df,n_neighbors=10,scale=True):
# df is a two column dataframe where first column is metric and second column is size
from copy import deepcopy
df = deepcopy(df)
df['order'] = np.arange(df.shape[0])
col = []
for i in range(df.shape[0]):
this_metric = df[0][i]
distance_to_this = (df[0] - this_metric).abs()
df_tmp = deepcopy(df)
df_tmp['distance'] = distance_to_this.values
df_tmp.sort_values(by='distance',inplace=True)
neighbors_metric = df_tmp.iloc[:,0][:n_neighbors].values
mean_ = neighbors_metric.mean()
std_ = neighbors_metric.std()
if scale:
if std_ == 0:
col.append(0)
else:
col.append((this_metric-mean_)/std_)
else:
col.append(this_metric-mean_)
df['normalized'] = col
return df
def regress_size(df_inspect,regressor='background_zscore',n_neighbors=10,to_dict=False):
# df_inspect, index is cluster name, col1 is metric, col2 is size
if regressor == 'background_zscore':
df_now = background_normalizer(df_inspect,n_neighbors,True)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'background_mean':
df_now = background_normalizer(df_inspect,n_neighbors,False)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'GLM':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
import statsmodels.api as sm
exog = sm.add_constant(exog,prepend=True)
model = sm.GLM(endog,exog,family=sm.families.Gaussian())
res = model.fit()
residual = res.resid_response
normalized_metric_series = residual
elif regressor == 'Huber':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import HuberRegressor
model = HuberRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
# outliers = model.outliers_
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'RANSAC':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
#outliers = np.logical_not(model.inlier_mask_)
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'TheilSen':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
if to_dict:
normalized_metric_dict = normalized_metric_series.to_dict()
final = normalized_metric_dict
else:
final = normalized_metric_series
return final
def tf_idf_bare_compute(df,cluster):
'''
now the df contains all the gene for and an additional column for cluster
'''
# compute its tf_idf
tmp1 = df.loc[df['cluster'] == cluster, :].loc[:,df.columns!='cluster'].values # (n_cells,n_genes)
tf = np.count_nonzero(tmp1,axis=0) / tmp1.shape[0] # (n_genes,)
tf = tf + 1e-5
tmp2 = df.loc[:,df.columns!='cluster'].values
df_ = np.count_nonzero(tmp2,axis=0) / tmp2.shape[0] # (n_genes,)
df_ = df_ + 1e-5
idf = -np.log10(df_)
tf_idf_ori = tf * idf # (n_genes,)
return tf_idf_ori
def single_size_query(obs,c):
# c would be {gs:ERP4}
key = list(c.keys())[0]
cluster = list(c.values())[0]
size = obs.loc[obs[key]==cluster,:].shape[0]
return size
def get_size_in_metrics(obs,key):
key_size_dict = {} # {ERP1:54,ERP2:100....}
for cluster in obs[key].unique():
size = single_size_query(obs,{key:cluster})
key_size_dict[cluster] = size
return key_size_dict
def tf_idf10_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf10 = {} # store tfidf10 score
cluster_to_exclusive = {} # store exclusivly expressed genes
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
# remove artifact genes
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result10 = test_pure.iloc[9]
cluster_to_tfidf10[item] = result10
cluster_to_exclusive[item] = test_pure.to_dict()
exclusive_genes = pd.Series(cluster_to_exclusive,name='genes')
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_tfidf10
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_tfidf10 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf10, exclusive_genes
def tf_idf5_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf5 = {} # store tfidf1 score
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
# remove artifact genes
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result5 = test_pure.iloc[4]
cluster_to_tfidf5[item] = result5
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_tfidf5
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict), | pd.Series(key_size_dict) | pandas.Series |
import gc
import numpy as np
import pandas as pd
from itertools import chain
from sklearn.decomposition import IncrementalPCA
import sklearn.linear_model
import sklearn.naive_bayes
import sklearn.ensemble
import sklearn.gaussian_process
from sklearn import metrics
from ..utils import map_fn
from ..config import cfg
from .base_model import BaseModel
def init_clf(clf_name, params):
model = params['models'][clf_name]
clf = eval(f'{model}()')
hparams = params[clf_name]['hparams']
if hparams is not None:
clf.set_params(**hparams)
return clf
def cv_worker_fn(args_in):
'''Worker function for CV train & eval:
1) fit a classifier to X_train/y_train
2) predict for X_val
3) return binary classification metrics (eval mode only)
'''
clf_name = args_in['clf_name']
X_train = args_in['X_train']
y_train = args_in['y_train']
X_val = args_in['X_val']
y_val = args_in['y_val']
model = args_in['params']['models'][clf_name]
clf = init_clf(clf_name, args_in['params'])
if args_in['mode'] == 'eval' and args_in['cv_params'] is not None:
clf.set_params(**args_in['cv_params'])
clf.fit(X_train, y_train)
predict = clf.predict(X_val)
if args_in['predict_proba']:
predict_proba = clf.predict_proba(X_val)
if args_in['mode'] == 'eval':
columns_list = ['fold_no', *args_in['cv_params'].keys(), 'auc', 'acc']
acc = metrics.accuracy_score(y_val, predict)
if args_in['predict_proba']:
auc = metrics.roc_auc_score(y_val, predict_proba[:, 1])
else: auc = np.nan
results_row = [args_in['fold'],
*[str(i) for i in args_in['cv_params'].values()], auc, acc]
round_results = pd.DataFrame([results_row], columns=columns_list)
gc.collect()
return round_results
elif args_in['mode'] == 'outputs':
outputs_df = pd.DataFrame(predict, columns=[args_in['clf_name'] + '_predict'],
index=X_val.index)
if args_in['predict_proba']:
pred_ser = pd.Series(predict_proba[:, 1], index=X_val.index)
pred_ser = pred_ser.rename(args_in['clf_name'] + '_predict_proba')
outputs_df = pd.concat([outputs_df, pred_ser], axis=1)
if args_in['decision_func']:
dec_func = clf.decision_function(X_val)
dec_ser = pd.Series(dec_func, index=X_val.index)
dec_ser = dec_ser.rename(args_in['clf_name'] + '_dec_func')
outputs_df = | pd.concat([outputs_df, dec_ser], axis=1) | pandas.concat |
from selenium import webdriver
import datetime as dt
import pandas as pd
import os
import time as time
import platform
import getpass
class Focus(object):
"""
Classe para puxar os dados do PIB total e IPCA do focus.
"""
indicator_dict = {'ipca': '5', 'pib': '9'}
metric_dict = {'mean': '2', 'median': '3', 'std': '4', 'vc': '5',
'max': '6', 'min': '7'}
freq_dict = {'monthly': '2', 'quarterly': '2', 'yearly': '3'}
def __init__(self, driver_path):
"""
Parameters
----------
driver_path: path of the chromedriver executable
"""
self.driver_options = webdriver.ChromeOptions()
self.driver_path = driver_path
def scrape(self, indicator, initial_date, end_date, metric='median', frequency='yearly'):
"""
Parameters
----------
indicator: str with the indicator. Possible values are Focus.indicator_dict
initial_date: must be understandable by pandas.to_datetime
end_date: must be understandable by pandas.to_datetime
metric: str with the statistical metric. Possible values are Focus.metric_dict
frequency: str with the frequency of the forecast. Possible values are Focus.frequncy_dict
Returns
-------
pandas DataFrame with the timeseries of each forecast horizon available.
"""
# assert that the chosen parameters exists
assert indicator in self.indicator_dict.keys(), f"the indicator {indicator} is not available"
assert metric in self.metric_dict.keys(), f"the metric {metric} is not available"
assert frequency in self.freq_dict.keys(), f"the frequency {frequency} is not available"
# ckeck if the indicator and frequency match
if (indicator == 'pib' and metric == 'monthly') or (indicator == 'ipca' and metric == 'quarterly'):
raise ValueError('Periodicity selected is not available for the indicator chosen.')
# open the browser
browser = webdriver.Chrome(self.driver_path, chrome_options=self.driver_options)
# navigating to the page
browser.get("https://www3.bcb.gov.br/expectativas/publico/consulta/serieestatisticas")
# select the indicator - chooses PIB or IPCA
xpath = f'//*[@id="indicador"]/option[{self.indicator_dict[indicator]}]'
browser.find_element_by_xpath(xpath).click()
# select the price index or the gdp group
if indicator == 'pib':
xpath = '//*[@id="grupoPib:opcoes_3"]' # total gdp
browser.find_element_by_xpath(xpath).click()
else:
xpath = r'//*[@id="grupoIndicePreco:opcoes_5"]' # IPCA
browser.find_element_by_xpath(xpath).click()
# select the metric
xpath = f'//*[@id="calculo"]/option[{self.metric_dict[metric]}]'
browser.find_element_by_xpath(xpath).click()
# select the periodicity
xpath = f'//*[@id="periodicidade"]/option[{self.freq_dict[frequency]}]'
browser.find_element_by_xpath(xpath).click()
# dates in datetime format
initial_date = pd.to_datetime(initial_date)
end_date = pd.to_datetime(end_date)
# generate the date_ranges in 18-month intervals (approximatly)
dates = pd.date_range(initial_date, end_date, freq='18m')
dates = list(dates)
if len(dates) == 1:
dates.append(end_date)
dates[0] = initial_date
# loops on all date pairs
list_df = []
for init_d, end_d in zip(dates[:-1], dates[1:]):
# fill the dates
xpath = r'//*[@id="tfDataInicial1"]'
browser.find_element_by_xpath(xpath).send_keys(init_d.strftime('%d/%m/%Y'))
xpath = r'//*[@id="tfDataFinal2"]'
browser.find_element_by_xpath(xpath).send_keys(end_d.strftime('%d/%m/%Y'))
# fill starting prediction scope (always chooses the first element of the dropdown menu)
if frequency == 'monthly':
xpath_m = r'//*[@id="mesReferenciaInicial"]/option[text()="janeiro"]'
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[2]/select[2]/option[text()="1999"]'
browser.find_element_by_xpath(xpath_m).click()
browser.find_element_by_xpath(xpath).click()
elif frequency == 'quarterly':
xpath_m = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[2]/select[1]/option[text()="janeiro a março"]'
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[2]/select[2]/option[text()="1999"]'
browser.find_element_by_xpath(xpath_m).click()
browser.find_element_by_xpath(xpath).click()
elif frequency == 'yearly':
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[2]/select/option[text()="1999"]'
browser.find_element_by_xpath(xpath).click()
else:
raise ValueError('Frequency selection is not treated by code.')
# fill ending prediction scope (always chooses the last element of the dropdown menu)
if frequency == 'monthly':
xpath_m = r'//*[@id="mesReferenciaFinal"]/option[text()="dezembro"]'
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[4]/select[2]'
browser.find_element_by_xpath(xpath_m).click()
elif frequency == 'quarterly':
xpath_m = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[4]/select[1]/option[text()="outubro a dezembro"]'
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[4]/select[2]'
browser.find_element_by_xpath(xpath_m).click()
elif frequency == 'yearly':
xpath = r'//*[@id="form4"]/div[2]/table/tbody[3]/tr/td[4]/select'
else:
raise ValueError('Frequency selection is not treated by code.')
# Pick final option in list (whatever year that is)
sel = browser.find_element_by_xpath(xpath)
sel.click()
options = sel.find_elements_by_tag_name('option')
options[len(options) - 1].click()
# click the download button
xpath = r'//*[@id="btnXLSa"]'
browser.find_element_by_xpath(xpath).click()
# saves the time the file was downloaded
download_save_time = dt.datetime.now()
# give some time for the download to finish
time.sleep(6)
# get the default download directory based on the operating system
if platform.system() == 'Windows':
username = os.getlogin()
download_path = f'C:/Users/{username}/Downloads'
elif platform.system() == 'Darwin': # MacOS
username = getpass.getuser()
download_path = f'/Users/{username}/Downloads'
else:
raise SystemError('This code can only run on Windows or MacOS')
# reads the downloaded file
file_path = None
for (dirpath, dirnames, filenames) in os.walk(download_path):
for f in filenames:
if 'Séries de estatísticas' in f:
file_save_time = os.path.getmtime(os.path.join(dirpath, f))
file_save_time = dt.datetime.fromtimestamp(file_save_time)
if file_save_time > download_save_time:
file_path = os.path.join(dirpath, f)
# read the file and clean the dataframe
df = | pd.read_excel(file_path, skiprows=1, na_values=[' ']) | pandas.read_excel |
"""
Receipts endpoint wrapper class
Possible requests:
* get_by_query: get receipts that respect passed in query parameters
* get_by_id: get receipt with a given ID
* get_by_date: get receipts for a given date
* get_by_dates: get receipts between two dates
"""
import pandas as pd
from datetime import datetime, timezone
from loyverse.api import Api
from loyverse.utils.dates import utc_isoformat, day_start, day_end
from loyverse.endpoints.fields import receipt as fields
class Receipts:
def __init__(self, api: Api):
self._api = api
self._path = 'receipts'
def get_by_query(self, receipt_numbers: list = None, since_receipt_number: str = None,
before_receipt_number: str = None, store_id: str = None, order: str = None, source: str = None,
updated_at_min: datetime = None, updated_at_max: datetime = None,
created_at_min: datetime = None, created_at_max: datetime = None, limit: int = 250,
cursor: str = None) -> dict:
"""
Retrieves receipts that respect the specific query criteria passed in. A detailed description of the query
parameters is available `here <https://developer.loyverse.com/docs/#tag/Receipts/paths/~1receipts/get>`_.
Args:
receipt_numbers (list): filter receipts by receipt numbers
since_receipt_number (str): return only receipts after this receipt number
before_receipt_number (str): return only receipts before this receipt number
store_id (str): filter receipts by store id
order (str): filter receipts by order
source (str): filter receipts by source (e.g. My app)
updated_at_min (datetime): filter receipts updated after this date (includes timezone info)
updated_at_max (datetime): filter receipts updated before this date (includes timezone info)
created_at_min (datetime): filter receipts created after this date (includes timezone info)
created_at_max (datetime): filter receipts created before this date (includes timezone info)
limit (int): maximum number of receipts to return per request (1 to 250)
cursor (str): token to get continuation of return list for requests exceeding limits
Returns:
response (dict): formatted receipts information (JSON)
"""
params = dict()
if receipt_numbers is not None:
params['receipt_numbers'] = ','.join(receipt_numbers)
if since_receipt_number is not None:
params['since_receipt_number'] = since_receipt_number
if before_receipt_number is not None:
params['before_receipt_number'] = before_receipt_number
if store_id is not None:
params['store_id'] = store_id
if order is not None:
params['order'] = order
if source is not None:
params['source'] = source
if updated_at_min is not None:
params['updated_at_min'] = utc_isoformat(updated_at_min)
if updated_at_max is not None:
params['updated_at_max'] = utc_isoformat(updated_at_max)
if created_at_min is not None:
params['created_at_min'] = utc_isoformat(created_at_min)
if created_at_max is not None:
params['created_at_max'] = utc_isoformat(created_at_max)
if limit is not None:
params['limit'] = limit
if cursor is not None:
params['cursor'] = cursor
response = self._api.request('GET', self._path, params=params)
return response
def get_by_id(self, receipt_id: str) -> dict:
"""
Retrieves the receipts information for a specific receipt ID
Args:
receipt_id (str): string uniquely identifying the receipt to be retrieved
Returns:
response (dict): formatted receipt information (JSON)
"""
return self._api.request('GET', f'{self._path}/{receipt_id}')
def get_by_date(self, date: datetime) -> dict:
"""
Retrieve receipts information for a specific day
Args:
date (datetime): datetime object representing day in question (including time zone info)
Returns:
response (dict): formatted receipts information (JSON)
"""
data = self.get_by_query(created_at_min=day_start(date),
created_at_max=day_end(date),
)
return data
def get_by_dates(self, start_date: datetime, end_date: datetime = None) -> dict:
"""
Retrieves receipts information for a specific date interval.
Args:
start_date (datetime): start date, including time-zone info
end_date (datetime): end date, including time-zone info (if not provided, defaults to UTC now)
Returns:
response (dict): formatted receipts information (JSON)
"""
if end_date is None:
end_date = datetime.now(timezone.utc)
data = self.get_by_query(created_at_min=day_start(start_date),
created_at_max=day_end(end_date),
)
return data
@staticmethod
def _receipt_to_dataframes(receipt: dict):
"""
Formats one receipts object into three dataframes, containing receipts and items information.
Args:
receipt (dict): a receipt object
Returns:
receipt_df (pandas.Dataframe): receipt level information
items_df (pandas.Dataframe): receipt items information
payments_df (pandas.Dataframe): receipt payments information
"""
if 'receipts' in receipt:
raise ValueError('Invalid receipt object passed in, should not contain - receipts - field')
id_key = 'receipt_number'
receipt_df = | pd.DataFrame({key: receipt[key] for key in fields.receipt}, index=[0]) | pandas.DataFrame |
"""
This file contains various generic utility methods that do not fall within
data or input-output methods.
"""
import math
import numpy as np
from datetime import datetime
from typing import Dict, Tuple
from collections import defaultdict
import pandas as pd
def timestamp() -> str:
return datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
def show_cross_val_results(cross_val_results: Dict[str, Tuple[float, float, float]]):
"""
Iterates over the dictionary of cross-validation results and generates
a good-looking dataframe of median - [CI lower, CI upper] for each
metric and set.
Parameters
----------
cross_val_results: Dict[str, Tuple[float, float, float]]
Dictionary of results from cross-validation procedure.
Returns
-------
pd.DataFrame
A dataframe showing cross-validation results with median - [CI lower, CI upper]
for each metric and set.
"""
results = defaultdict(list)
metric_names = []
for key, (med, lower_ci, upper_ci) in cross_val_results.items():
set_name, metric_name = key.split("_")
metric_names.append(metric_name)
results[set_name].append(f"{med} - [{lower_ci}-{upper_ci}]")
print(metric_names)
output = | pd.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import datetime as dt
def make_column_index(df:pd.DataFrame, column_label:str) -> None:
df.index = df[column_label]
df.drop(column_label, axis=1, inplace=True)
df.index.name = None
def rename_column(df:pd.DataFrame, column_label:str, new_name:str) -> None:
df.rename(columns={column_label: new_name}, inplace=True)
def remove_outliers(df:pd.DataFrame, column_label:str) -> str:
raw_data = df[column_label]
mean = np.mean(raw_data)
std_dev = np.std(raw_data)
outliers_cutoff = std_dev * 3
lower_limit = mean - outliers_cutoff
upper_limit = mean + outliers_cutoff
no_outliers = raw_data.apply(lambda x: mean if x > upper_limit or x < lower_limit else x)
outlier_column = f'{column_label} (-outliers)'
df[outlier_column] = no_outliers
return outlier_column
def unstack_data(df:pd.DataFrame, metric_column:str, unstack_column:str) -> pd.DataFrame:
pivoted = pd.pivot_table(df, index=['date'], values=[metric_column], columns=[unstack_column], aggfunc=[np.sum])
pivoted.columns = pivoted.columns.droplevel(0)
pivoted.columns.name = None
pivoted = pivoted.reset_index()
pivoted.columns = [col[1] for col in pivoted.columns]
metric_columns = list(pivoted.columns[1:])
metric_columns = [f"{c} | {metric_column}" for c in metric_columns]
pivoted.columns = ["date"] + metric_columns
pivoted.fillna(0, inplace=True)
return pivoted
def transpose_data(df:pd.DataFrame) -> pd.DataFrame:
date_col = df.columns[0]
df = df.T
df.columns = df.iloc[0]
df.drop(df.index[0], inplace=True)
df.reset_index(inplace=True)
df.rename(columns={"index": date_col}, inplace=True)
df = df.rename_axis(None, axis = 1)
return df
def interpolate_weekly_data(df, date_col=None, resample_col=None):
df = df.copy()
if date_col == None:
date_col = df.columns[0]
if resample_col == None:
resample_col = df.columns[1]
df[date_col] = df[date_col].apply(lambda x: dt.datetime.strptime(f"{x}-1", "%Y-%W-%w")) # mondays
df[date_col] = pd.to_datetime(df[date_col]) # datetime
df.set_index(date_col, inplace=True)
df_reindexed = df.reindex(pd.date_range(start=df.index.min(),
end=df.index.max() + dt.timedelta(days=6),
freq='1D'))
col_to_resample = df_reindexed.columns[0]
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].fillna(0)
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].astype(str)
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace(',',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('$',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('£',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('€',''))
df_reindexed[col_to_resample] = pd.to_numeric(df_reindexed[col_to_resample])
df_reindexed[col_to_resample].replace({0:np.nan}, inplace=True)
df = df_reindexed.interpolate(method='linear')
df = df / 7
df.reset_index(inplace=True)
df.rename({'index': 'date'}, axis=1, inplace=True)
return df
def interpolate_monthly_data(df, date_col=None, resample_col=None):
df = df.copy()
if date_col == None:
date_col = df.columns[0]
if resample_col == None:
resample_col = df.columns[1]
df[date_col] = pd.to_datetime(df[date_col], format="%Y-%m")
df['start_of_month'] = (df[date_col].dt.floor('d') + pd.offsets.MonthEnd(0) - pd.offsets.MonthBegin(1))
df['end_of_month'] = pd.to_datetime(df['start_of_month']) + pd.offsets.MonthEnd(1)
df['days_in_month'] = (df['end_of_month'] - df['start_of_month']).dt.days + 1
df[resample_col] = df[resample_col] / df['days_in_month']
reindexed = df.set_index("start_of_month")
reindexed = reindexed.reindex(pd.date_range(start=reindexed.index.min(),
end=reindexed.end_of_month.max(),
freq='1D'))
resampled = reindexed[resample_col]
resampled.replace({0:np.nan}, inplace=True)
resampled = resampled.interpolate(method='linear')
resampled = resampled.reset_index()
resampled.rename({'index': 'date'}, axis=1, inplace=True)
resampled.fillna(0, inplace=True)
return resampled
def group_weekly(df, date_col:str) -> pd.DataFrame:
weekly = df.copy()
weekly['week'] = weekly[date_col].dt.isocalendar().week
weekly['year'] = weekly[date_col].dt.isocalendar().year
weekly['year_week'] = weekly['year'].astype(str) + "-" + weekly['week'].astype(str)
weekly = weekly.groupby('year_week').sum()
weekly.drop(['week', 'year'], axis=1, inplace=True)
weekly.reset_index(inplace=True)
return weekly
def group_monthly(df, date_col:str) -> pd.DataFrame:
monthly = df.copy()
monthly['month'] = monthly[date_col].dt.month
monthly['year'] = monthly[date_col].dt.isocalendar().year
monthly['year_month'] = monthly['year'].astype(str) + "-" + monthly['month'].astype(str)
monthly = monthly.groupby('year_month').sum()
monthly.drop(['month', 'year'], axis=1, inplace=True)
monthly.reset_index(inplace=True)
return monthly
def handle_search_trends_data(df:pd.DataFrame) -> pd.DataFrame:
# delete any '<' signs for low volume days
for c in df.select_dtypes(include=['object']).columns[1:]:
df[c] = df[c].str.replace('<', '')
df[c] = pd.to_numeric(df[c])
date_col = df.columns[0]
df[date_col] = pd.to_datetime(df[date_col])
df.set_index(date_col, inplace=True)
df_reindexed = df.reindex(pd.date_range(start=df.index.min(),
end=df.index.max() + dt.timedelta(days=6), freq='1D'))
df = df_reindexed.interpolate(method='linear')
df = df.round(1)
df.reset_index(inplace=True)
df.rename({'index': 'date'}, axis=1, inplace=True)
return df
def handle_covid_data(data:pd.DataFrame, sub_region_1:str=None) -> pd.DataFrame:
if sub_region_1 is None:
df = data[data['sub_region_1'].isnull()]
else:
df = data[data['sub_region_1'] == sub_region_1]
df = df[df['sub_region_2'].isnull()]
df.reset_index(inplace=True)
return df[df.columns[9:]]
def handle_weather_data(df:pd.DataFrame) -> pd.DataFrame:
year = df['YEAR'].astype(str)
month = df['MO'].astype(str)
day = df['DY'].astype(str)
month = month.apply(lambda x: '0'+x if len(x) == 1 else x)
day = day.apply(lambda x: '0'+x if len(x) == 1 else x)
df['date'] = pd.to_datetime(year + "-" + month + "-" + day)
df = df[['date', 'T2M_RANGE', 'T2M_MAX', 'T2M_MIN', 'T2M']]
return df
def create_holiday_dummies(df:pd.DataFrame) -> pd.DataFrame:
dr = pd.date_range(start=df['date'].min(), end=df['date'].max())
date_df = pd.DataFrame({'date': dr})
for _, row in df.iterrows():
date_df[row[1]] = (date_df['date'] == row[0])
date_df.iloc[:, 1:] = date_df.iloc[:, 1:].astype(int)
return date_df
def create_date_range_dummies(df:pd.DataFrame) -> pd.DataFrame:
dr = pd.date_range(start=df['start'].min(), end=df['end'].max())
date_df = pd.DataFrame({'date': dr})
for _, row in df.iterrows():
date_df[row[2]] = (date_df['date'] >= row[0]) & (date_df['date'] <= row[1])
date_df.iloc[:, 1:] = date_df.iloc[:, 1:].astype(int)
return date_df
def add_start_of_month(df:pd.DataFrame, date_col:str) -> pd.DataFrame:
df['start_of_month'] = (df[date_col].dt.floor('d') + pd.offsets.MonthEnd(0) - | pd.offsets.MonthBegin(1) | pandas.offsets.MonthBegin |
# Copyright 2019 Nokia
# Licensed under the BSD 3 Clause Clear license
# SPDX-License-Identifier: BSD-3-Clause-Clear
import pandas as pd
import numpy as np
from datetime import datetime
import math
# increments = 0
# search_range = 0
# P7_NUM = 0
# current_date = 0
# qq_plot_start = 5
# qq_plot_end = 100
# qq_plot_increment = 5
# qq_plot_limit = 0.3
def run_edpm(feature_data, defect_data, P7, inc, date, srange, qq_start, qq_end, qq_increment, qq_limit):
global P7_NUM, increments, current_date, search_range, qq_plot_start, qq_plot_end, qq_plot_increment, qq_plot_limit
#feature_data.to_csv("feature_data.csv")
#defect_data.to_csv("defect_data.csv")
P7_NUM = P7
increments = inc
current_date = date
search_range = srange
qq_plot_start = qq_start
qq_plot_end = qq_end
qq_plot_increment = qq_increment
qq_plot_limit = qq_limit
# print('P7_NUM =', P7,
# 'increments =', inc,
# 'current_date =', date,
# 'search_range =' ,srange)
defects_start_date = defect_data['Date_Ending'].values[0]
features_start_date = feature_data['Month_Ending'].values[0]
defects_end_date = defect_data['Date_Ending'].values[-1]
features_end_date = feature_data['Month_Ending'].values[-1]
defect_data['X'] = 1+(defect_data['Date_Ending'] - defects_start_date).dt.days.astype(int)
feature_data['X'] = 1+(feature_data['Month_Ending'] - features_start_date).dt.days.astype(int)
feature_data.reset_index(inplace=True)
feature_new_days = list(range(feature_data['X'].values[0], feature_data['X'].values[-1], increments))
defect_new_days = list(range(defect_data['X'].values[0], defect_data['X'].values[-1], increments))
gap = int(((defects_start_date - features_start_date).astype('timedelta64[D]').astype(int))/increments)
#print(feature_data)
#print(defect_data)
#exit()
feature_new_data = perform_interpolation(feature_new_days, feature_data['X'].values, feature_data['Sub-feature_Arrival'].values)
defect_new_data = perform_interpolation(defect_new_days, defect_data['X'].values, defect_data['Created'].values)
resolved_new_data = perform_interpolation(defect_new_days, defect_data['X'].values, defect_data['Resolved'].values)
#print(feature_new_days)
#print(final_index)
#print("XXXXXXX")
#exit()
final_data = get_data(feature_new_days, defect_new_days, feature_new_data, defect_new_data, resolved_new_data)
final_data['WEEK'] = final_data.index.values + 1
#print(final_data)
#final_data.to_csv("data_new.csv")
#print("m: ", gap)
#print("p7: ", P7_NUM)
#print("increments: ", increments)
a, b, c = create_qq_plot(final_data['FEATURES'].values, final_data['ARRIVALS'].values)
final_data['WEEK_(X_NEW)'] = a + b * final_data['WEEK']
final_data['ARRIVALS_(Y_NEW)'] = c * final_data['FEATURES']
ssq = get_ssq(final_data['ARRIVALS'].values, final_data['WEEK_(X_NEW)'].values, final_data['ARRIVALS_(Y_NEW)'].values)
#print("SSQ:", ssq)
#print(final_data)
#exit()
N_p = current_date/(P7_NUM)
F_p = int(N_p*len(final_data['FEATURES'].dropna().values))
start_week = max(0, (F_p - search_range))
end_week = min((F_p + search_range), (len(final_data['FEATURES'].dropna().values)))
evaluation = []
for index in range(start_week, end_week):
feature_data = final_data['FEATURES'].values[:index]
arrivals = final_data['ARRIVALS'].values
week_data = np.asarray([i+1 for i in range(len(feature_data))])
#print(week_data)
a, b, c = create_qq_plot(feature_data, arrivals)
x_new = a + b * week_data
y_new = c * feature_data
#print("x_new: ", len(x_new))
#print("y_new: ", len(y_new))
#print("week_data: ", len(week_data))
#print("arrivals: ", len(arrivals))
#exit()
ssq = get_ssq(arrivals, x_new, y_new)
evaluation.append([index, a, b, c, ssq])
df = pd.DataFrame(evaluation, columns=['index', 'intercept', 'slope', 'ratio', 'ssq'])
#df.to_csv('SSQ_CHECK.csv')
best_index = df.loc[df['ssq'].idxmin()]
best_index['gap'] = gap
best_index = best_index.round(2)
result = best_index.to_dict()
result['defects_start_date'] = pd.Timestamp(defects_start_date)
result['features_start_date'] = pd.Timestamp(features_start_date)
#best_index['defects_start_date'] = defects_start_date
#best_index['features_start_date'] = features_start_date
#print(final_data)
#print(current_date)
#time_from_P7 = P7_NUM - current_date
#print(time_from_P7)
#print(final_data['FEATURES'].values)
feature_data = final_data['FEATURES'].dropna().values[int(best_index['index']):]
#predict_range = np.asarray([i+1 for i in range(current_date, P7_NUM)])
#print(len(feature_data))
#print(len(predict_range))
#exit()
#print(final_data)
#print(best_index)
#x_new = best_index['intercept'] + best_index['slope'] * predict_range
#print(x_new)
#exit()
#required_range = [i for i in predict_range if i > np.min(x_new) and i < np.max(x_new)]
#print(required_range)
y_new = best_index['ratio'] * feature_data
x_new = [current_date+i for i in range(len(y_new))]
#print(current_date)
#print(feature_data)
#print(y_new)
#y_new = perform_interpolation(required_range, x_new, y_new)
#x_new = required_range
df = pd.DataFrame({'y_new': y_new, 'x_new': x_new})
#print(df)
#exit()
final_data = final_data.merge(df, left_on='WEEK', right_on='x_new', how='outer')
#print(final_data)
#print(result)
#final_data.to_csv("FINAl_DATA.csv")
#print(len(final_data))
#print(len(pd.date_range(start=defects_start_date, periods=len(df), freq=str(increments)+'D')))
final_data['defect_dates'] = pd.date_range(start=defects_start_date, periods=len(final_data), freq=str(increments)+'D')
final_data['feature_dates'] = pd.date_range(start=features_start_date, periods=len(final_data), freq=str(increments)+'D')
result['dates'] = list(final_data['defect_dates'].append(final_data['feature_dates']).sort_values().drop_duplicates().astype(str).values)
final_data['defect_dates'] = final_data['defect_dates'].astype(str)
final_data['feature_dates'] = final_data['feature_dates'].astype(str)
#print(final_data)
#exit()
#exit()
#result['dates'] = list(set(list(final_data['defect_dates']) + list(final_data['feature_dates'])))
result['predictions'] = final_data[['defect_dates', 'y_new']].rename(columns={'defect_dates': 'date', 'y_new':'value'}).dropna().to_dict(orient='records')
result['features'] = final_data[['feature_dates', 'FEATURES']].rename(columns={'feature_dates': 'date', 'FEATURES':'value'}).dropna().to_dict(orient='records')
result['actual'] = final_data[['defect_dates', 'ARRIVALS']].rename(columns={'defect_dates': 'date', 'ARRIVALS':'value'}).dropna().to_dict(orient='records')
#print(features)
#exit()
#print(final_data)
#print(best_index)
#print(defects_start_date)
#print(features_start_date)
#exit()
#p7_week = P7_NUM
#P7_Prediction = perform_interpolation([p7_week], x_new, y_new)[0]
#print(P7_Prediction)
return result
#print(final_data)
#final_data.to_csv("FINAl_DATA.csv")
def get_ssq(arrivals, x_new, y_new):
df1 = pd.DataFrame({'WEEK':[i+1 for i in range(len(arrivals))], 'ARRIVALS':arrivals})
min_week = int(math.ceil(np.min(x_new)))
max_week = int(math.floor(np.max(x_new)))
week_range = [i for i in range(min_week, max_week+1)]
#x_new = x_new[:len()]
#print("k: ", len(week_range))
#print(week_range)
#print(x_new)
#print("l: ", len(x_new))
#print("m: ", len(y_new))
new_values = perform_interpolation(week_range, x_new, y_new, roundoff=False)
#print(new_values)
#print(len(new_values))
df2 = pd.DataFrame({'D2':week_range, 'ARRIVALS_(Y_NEW)':new_values})
df = df1.merge(df2, how='outer', left_on='WEEK', right_on='D2')
df['ERROR'] = (df['ARRIVALS'] - df['ARRIVALS_(Y_NEW)'])**2
p = df.count()['ERROR']
#print("p: ", p)
ssq = round(math.sqrt(df['ERROR'].sum()/(p-2)), 3)
del df['D2']
return ssq
# def determine_ssq(final_data):
# #final_data['ARRIVALS_NEW'] = c * final_data['ARRIVALS']
# #print(len())
# min_week = int(math.ceil(final_data['WEEK_(X_NEW)'].min()))
# max_week = final_data['WEEK'].max()
# week_range = [i for i in range(min_week, max_week+1)]
# #print(max_week, min_week, len(week_range), week_range)
# row_data = []
# if len(week_range) < len(final_data):
# diff = len(final_data) - len(week_range)
# row_data = [None for i in range(diff)]
# row_data += perform_interpolation(week_range, final_data['WEEK_(X_NEW)'].values, final_data['ARRIVALS_(Y_NEW)'].values, roundoff=False)
# #print(row_data)
# if len(row_data) < len(final_data):
# diff = len(final_data) - len(row_data)
# nones = [None for i in range(diff)]
# row_data += nones
# #print(len(row_data))
# #print(len(final_data))
# #exit()
# final_data['SHIFTED_Y'] = row_data
# final_data['(Y_ACT-Y_PRED)^2'] = final_data['ARRIVALS'] - final_data['SHIFTED_Y']
# final_data['(Y_ACT-Y_PRED)^2'] = final_data['(Y_ACT-Y_PRED)^2']**2
# p = final_data.count()['(Y_ACT-Y_PRED)^2']
# print("p: ", p)
# ssq = round(math.sqrt(final_data['(Y_ACT-Y_PRED)^2'].sum()/(p-2)), 3)
# #print(final_data)
# #print("SSQ: ", ssq)
# return ssq, final_data
def create_qq_plot(feature_data, arrival_data):
# qq_plot_start = 5
# qq_plot_end = 100
# qq_plot_increment = 5
# qq_plot_limit = 0.3
max_feature = np.nanmax(feature_data)
max_defect = np.nanmax(arrival_data)
FEATURES_CDF = (feature_data/max_feature).round(5)
ARRIVALS_CDF = (arrival_data/max_defect).round(5)
w = [(i/100) for i in range(qq_plot_start,qq_plot_end,qq_plot_increment) if ((i/100) > np.nanmin(FEATURES_CDF)) and ((i/100) > np.nanmin(ARRIVALS_CDF))]
#print("w: ", w)
#prinr("W: ", w)
#print("CDF: ", FEATURES_CDF)
Q_features = perform_interpolation(w, FEATURES_CDF, [i+1 for i in range(len(feature_data))], roundoff=False)
Q_arrivals = perform_interpolation(w, ARRIVALS_CDF, [i+1 for i in range(len(arrival_data))], roundoff=False)
#print(Q_arrivals)
#print(Q_features)
#exit()
arrivals_95pct = perform_interpolation([0.95], ARRIVALS_CDF, arrival_data, roundoff=False)[0]
features_95pct = perform_interpolation([0.95], FEATURES_CDF, feature_data, roundoff=False)[0]
c = arrivals_95pct/features_95pct #ratio
QQ = pd.DataFrame([[i] for i in w], columns=['p'])
#print(QQ)
#print(Q_features)
QQ['x'] = Q_features
QQ['y'] = Q_arrivals
QQ['xx'] = QQ['x']*QQ['x']
QQ['xy'] = QQ['x']*QQ['y']
#print(QQ)
#print(QQ)
QQ = QQ[QQ['p'] >= qq_plot_limit]
#print(QQ)
n = len(QQ)
a = (QQ['y'].sum()*QQ['xx'].sum() - QQ['x'].sum()*QQ['xy'].sum())/(n*QQ['xx'].sum()-QQ['x'].sum()*QQ['x'].sum()) #intercept
b = (n*(QQ['xy'].sum()) - QQ['x'].sum()*QQ['y'].sum())/(n*QQ['xx'].sum() - QQ['x'].sum()*QQ['x'].sum()) #slope
#print("n: ", n)
#print(("a: %f, b: %f, c: %f") %(a, b, c))
return a, b, c
def read_data():
feature_data = pd.read_csv(directory+'feature_data.csv')
defect_data = pd.read_csv(directory+'defect_data.csv')
feature_data['Month_Ending'] = | pd.to_datetime(feature_data['Month_Ending'], format='%d/%m/%Y') | pandas.to_datetime |
# Written by: <NAME>, @dataoutsider
# Viz: "Party Lines", enjoy!
import pandas as pd
import os
import math
df = pd.read_csv(os.path.dirname(__file__) + '/1976-2016-president.csv', engine='python') # test with , nrows=20
df['term'] = df['year']
df2016 = df.loc[df['year'] == 2016]
df2016 = df2016.groupby('state').agg({'totalvotes':'max'}).reset_index()
df2016['2016_votes'] = df2016['totalvotes']
df2016['2016_rank'] = df2016['totalvotes'].rank(ascending=False)
df2016['t_totalvotes'] = df2016['totalvotes']/100000
df2 = pd.read_csv(os.path.dirname(__file__) + '/state_test3.csv', engine='python') # test with , nrows=20
#df3 = pd.merge(df2, df, on=['state', 'term'], how='inner')
df3 = pd.merge(df2, df2016, on=['state'], how='left')
#df3.to_csv(os.path.dirname(__file__) + '/state_test_1.csv', encoding='utf-8', index=False)
#print(df3)
df3['x2'] = 0
data = []
df_group = df3.groupby(['term'])
separation = 20.0
xadd = 0.0
istate = 1.0
ix = 0
for name, group in df_group:
year = group.sort_values('2016_rank', ascending=True)
for index, row in year.iterrows():
if row['2016_rank'] != istate:
xadd += ix
row['x2'] = row['x'] + xadd + separation * row['2016_rank']
data.append(row)
istate = row['2016_rank']
ix = row['t_totalvotes']
istate = 1
ix = 0
xadd = 0
df_state = | pd.DataFrame(data, columns=df3.columns) | pandas.DataFrame |
from .data import CovidData
import datetime as dt
from matplotlib.offsetbox import AnchoredText
import pandas as pd
import seaborn as sns
import geopandas as gpd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def pan_duration(date):
"""Return the duration in days of the pandemic.
As calculated from the gov.uk API. It subtracts the first date entry
in the API data from the most recent date entry.
Args:
date (datetime): DataFrame column (i.e Series) containing date
field as downloaded from the gov.uk API by get_national_data()
method from CovidData Class.
Returns:
datetime: Duration of pandemic in days as datetime object.
"""
return (date[0] - date[-1]).days
def validate_input(df):
"""Check that input into the plotting functions is of the correct type.
Args:
df (Pandas DataFrame): this is intended to be the plotting parameter
Raises:
TypeError: if parameter is not a DataFrame
"""
# if for_function == 'deaths' or for_function == 'cases':
# expected_cols = {'cases_cumulative', 'cases_demographics',
# 'cases_newDaily', 'case_rate', 'date',
# 'death_Demographics', 'name', 'vac_firstDose',
# 'vac_secondDose'}
if not isinstance(df, pd.DataFrame):
raise TypeError('Parameter must be DataFrame, use get_regional_data'
+ ' method from CovidData class.')
# if set(df.columns) != expected_cols:
# raise ValueError('Incorrect features. Expecting output from'
# + ' get_regional_data method from CovidData class')
def my_path():
"""Find correct path at module level for geo_data files.
Returns:
[type]: [description]
"""
from pathlib import Path
base = Path(__file__).resolve().parent / 'geo_data'
return base
def daily_case_plot(df, pan_duration=pan_duration, save=False):
"""Create a matplotlib plot of case numbers in the UK.
Calculated over the duration of the pandemic.Display text information
giving the most recent daily number, the highest daily number and the
date recorded, the total cumulative
number of cases and the duration of the pandemic in days.
Args:
df (DataFrame): containing covid data retrieved from CovidData
class using get_national_data() or get_UK_data() method.
pan_duration (function, optional): Defaults to pan_duration.
save (bool, optional): set True to save plot. Defaults to False.
Returns:
- Matplotlib plot, styled using matplotlib template 'ggplot'
"""
# Create Variables we wish to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
cumulative = df['case_cumulativeCases'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(cases), cases.index(max(cases))
high_date = date[arg_high].strftime('%d %b %Y')
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
plt.style.use('ggplot')
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, cases)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Cases')
ax.fill_between(date, cases,
alpha=0.3)
ax.set_title('Number of people who tested positive for Covid-19 (UK)',
fontsize=18)
at = AnchoredText(f"Most recent new cases\n{cases[0]:,.0f}\
\nMax new cases\n{high:,.0f}: {high_date}\
\nCumulative cases\n{cumulative[0]:,.0f}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
if save:
plt.savefig(f"{date[0].strftime('%Y-%m-%d')}-case_numbers_plot");
plt.show()
def regional_plot_cases(save=False):
"""Plot regional case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case numbers on map of UK
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
regions = regions.assign(case_newCases=regions['cases_newDaily'])
# Set date to plot
date_selector = regions['date'][0]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = \
scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_newCases']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_newCases']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_newCases']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except is not good practice, this should be changed
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_newCases'
# Plot range
feature_min, feature_max = merged['case_newCases'].min(), \
merged['case_newCases'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases per region {date_selector}',
fontdict={'fontsize': '18', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_cases_plot')
def regional_plot_rate(save=False):
"""Plot regional case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case rate on map of UK.
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
# Set date to plot
date_selector = regions['date'][5]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_rate']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_rate']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_rate']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
feature_min, feature_max = merged['case_rate'].min(),\
merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title('Regional rate per 100,000 (new cases)',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_rate_plot')
def heatmap_cases(df):
"""Create heatmap of case numbers for duration of pandemic.
Args:
df (DataFrame): Covid case data retrieved by calling CovidData
class method.
Returns:
Seaborn heatmap plot of case numbers for each day of the pandemic.
"""
# Variables to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
# Create new DataFrame containing two columns: date and case numbers
heat_df = pd.DataFrame({'date': date, 'cases': cases}, index=date)
# Separate out date into year month and day
heat_df['year'] = heat_df.index.year
heat_df["month"] = heat_df.index.month
heat_df['day'] = heat_df.index.day
# Use groupby to convert data to wide format for heatmap plot
x = heat_df.groupby(["year", "month", "day"])["cases"].sum()
df_wide = x.unstack()
# Plot data
sns.set(rc={"figure.figsize": (12, 10)})
# Reverse colormap so that dark colours represent higher numbers
cmap = sns.cm.rocket_r
ax = sns.heatmap(df_wide, cmap=cmap)
ax.set_title('Heatmap of daily cases since start of pandemic',
fontsize=20)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.01), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.show()
def local_rate_plot(save=False):
"""Plot local case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of local case rate on map of UK
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][5]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_rate']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
vmin, vmax = merged['case_rate'].min(), merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Local rate per 100,000 {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_rate_plot')
def local_cases_plot(save=False):
"""Plot local case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][0]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_newDaily']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_newDaily'
# Plot range
vmin, vmax = merged['case_newDaily'].min(), \
merged['case_newDaily'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases by local authority {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_cases_plot')
def case_demographics(df):
"""Produce a plot of the age demographics of cases across England.
Args:
df (DataFrame): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of case numbers broken down by age
"""
validate_input(df)
df_list = df.loc[:, ['cases_demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = | pd.DataFrame(df_list.iloc[i, 0]) | pandas.DataFrame |
from datetime import timedelta
from io import StringIO
import pandas as pd
from abide.schedule import ScheduledJobDefinition, ScheduledJobState, RunState, Scheduler, \
read_job_definitions
def test_basic():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("* * * * *")})
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:07:00')
assert job_retry == 0
def test_late_start_cutoff_zero():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("* * * * *", late_start_cutoff=0)})
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:07:00')
assert job_retry == 0
def test_every_5_mins():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("*/5 * * * *", late_start_cutoff=0)})
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:10:00')
assert job_retry == 0
def test_last_run_complete():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("*/5 * * * *", late_start_cutoff=0)})
s.job_states['A'] = ScheduledJobState(pd.to_datetime('9/28/2020 13:05:00'), 0, pd.to_datetime('9/28/2020 13:05:33'), RunState.COMPLETE)
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:10:00')
assert job_retry == 0
def test_last_run_running():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("*/5 * * * *", late_start_cutoff=0)})
s.job_states['A'] = ScheduledJobState(pd.to_datetime('9/28/2020 13:05:00'), 0, pd.to_datetime('9/28/2020 13:05:33'), RunState.RUNNING)
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:10:00')
assert job_retry == 0
def test_last_run_fail():
s = Scheduler(pd.to_datetime('9/28/2020 13:06:03'),
{'A': ScheduledJobDefinition("*/5 * * * *", late_start_cutoff=0)})
s.job_states['A'] = ScheduledJobState(pd.to_datetime('9/28/2020 13:05:00'), 0, pd.to_datetime('9/28/2020 13:05:33'), RunState.FAIL)
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:10:00')
assert job_retry == 0
def test_last_run_fail_permanently():
s = Scheduler(pd.to_datetime('9/28/2020 00:00:00'),
{'A': ScheduledJobDefinition("*/5 * * * *", late_start_cutoff=None, retries=3)})
s.job_states['A'] = ScheduledJobState(pd.to_datetime('9/28/2020 13:05:00'), 0, pd.to_datetime('9/28/2020 13:05:33'), RunState.FAIL_PERMANENT)
s.set_time(pd.to_datetime('9/28/2020 13:06:03'))
when, (job_time, job_retry) = s.get_next_run_for_job('A')
assert job_time == when == pd.to_datetime('9/28/2020 13:10:00')
assert job_retry == 0
def test_three_retries_last_run_complete():
s = Scheduler( | pd.to_datetime('9/28/2020 00:00:00') | pandas.to_datetime |
# Copyright (C) 2016 <NAME> <<EMAIL>>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import Time as tsti
from . import DateTime_Functions as dtfunc
from . import Perf as tsperf
from . import Plots as tsplot
from . import Utils as tsutil
# for timing
import time
class cAbstractCycle:
def __init__(self , trend):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mTrend = trend;
self.mTrend_residue_name = self.mTrend.mOutName + '_residue'
self.mFormula = None;
self.mComplexity = None;
def getCycleResidueName(self):
return self.getCycleName() + "_residue";
def plot(self):
tsplot.decomp_plot(self.mCycleFrame, self.mTimeInfo.mNormalizedTimeColumn,
self.mTrend_residue_name, self.getCycleName() , self.getCycleResidueName(), horizon = self.mTimeInfo.mHorizon);
def check_not_nan(self, sig , name):
#print("check_not_nan");
if(np.isnan(sig).any() or np.isinf(sig).any() ):
logger = tsutil.get_pyaf_logger();
logger.error("CYCLE_RESIDUE_WITH_NAN_IN_SIGNAL" + str(sig));
raise tsutil.Internal_PyAF_Error("CYCLE_COLUMN _FOR_TREND_RESIDUE ['" + name + "'");
pass
def compute_target_means_by_cycle_value(self , iCycleFrame, iCycleName):
# we encode only using estimation
lCycleFrameEstim = self.mSplit.getEstimPart(iCycleFrame);
lGroupBy = lCycleFrameEstim.groupby(by=[iCycleName] , sort=False)[self.mTrend_residue_name]
lEncodedValueDict = None
if(self.mOptions.mCycle_Encoding_Scheme == "Target_Mean"):
lEncodedValueDict = lGroupBy.mean().to_dict();
else:
lEncodedValueDict = lGroupBy.median().to_dict();
for x in lEncodedValueDict.keys():
lEncodedValueDict[ x ] = np.float64(lEncodedValueDict[ x ])
return lEncodedValueDict
def compute_target_means_default_value(self):
# we encode only using estimation
lCycleFrameEstim = self.mSplit.getEstimPart(self.mCycleFrame);
if(self.mOptions.mCycle_Encoding_Scheme == "Target_Mean"):
return np.float64(lCycleFrameEstim[self.mTrend_residue_name].mean());
return np.float64(lCycleFrameEstim[self.mTrend_residue_name].median());
def computePerf(self):
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleResidueName()], self.getCycleResidueName())
# self.mCycleFrame.to_csv(self.getCycleResidueName() + ".csv");
self.mCycleFitPerf = tsperf.cPerf();
self.mCycleForecastPerf = tsperf.cPerf();
# self.mCycleFrame[[self.mTrend_residue_name, self.getCycleName()]].to_csv(self.getCycleName() + ".csv");
(lFrameFit, lFrameForecast, lFrameTest) = self.mSplit.cutFrame(self.mCycleFrame);
self.mCycleFitPerf.computeCriterion(
lFrameFit[self.mTrend_residue_name], lFrameFit[self.getCycleName()],
self.mOptions.mCycle_Criterion, self.getCycleName())
self.mCycleForecastPerf.computeCriterion(
lFrameForecast[self.mTrend_residue_name], lFrameForecast[self.getCycleName()],
self.mOptions.mCycle_Criterion, self.getCycleName())
class cZeroCycle(cAbstractCycle):
def __init__(self , trend):
super().__init__(trend);
self.mFormula = "NoCycle"
self.mComplexity = 0;
def getCycleName(self):
return self.mTrend_residue_name + "_zeroCycle";
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {}
logger.info("ZERO_CYCLE_MODEL_VALUES " + self.getCycleName() + " 0.0 " + "{}");
def fit(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.getCycleName()] = np.zeros_like(self.mTrendFrame[self.mTrend_residue_name])
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name];
self.mOutName = self.getCycleName()
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = np.zeros_like(df[self.mTrend_residue_name]);
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cSeasonalPeriodic(cAbstractCycle):
def __init__(self , trend, date_part):
super().__init__(trend);
self.mDatePart = date_part;
self.mEncodedValueDict = {}
self.mFormula = "Seasonal_" + self.mDatePart.name;
def getCycleName(self):
return self.mTrend_residue_name + "_Seasonal_" + self.mDatePart.name;
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {}
logger.info("SEASONAL_MODEL_VALUES " + self.getCycleName() + " " + str(self.mDefaultValue) + " " + str(self.mEncodedValueDict));
def hasEnoughData(self, iTimeMin, iTimeMax):
lTimeDelta = iTimeMax - iTimeMin;
lDays = lTimeDelta / np.timedelta64(1,'D');
lSeconds = lTimeDelta / np.timedelta64(1,'s');
# these are just guessses of how much dataa is needed to get valid signal stats/means of each seasonal unit.
# TODO : add these in the options. (None, None) => no limit
lThresholds = {
dtfunc.eDatePart.Hour : (1 * 10 , None), # 10 days
dtfunc.eDatePart.Minute : (None , 3600 * 10), # 10 hours
dtfunc.eDatePart.Second : (None , 360 * 10), # 10 minutes
dtfunc.eDatePart.DayOfMonth : (30 * 10 , None), # 10 months
dtfunc.eDatePart.DayOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.MonthOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.WeekOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.WeekOfYear : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.DayOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.HourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.TwoHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.ThreeHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.FourHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.SixHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.EightHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.TwelveHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.WeekOfMonth : (30 * 10 , None), # 10 months
dtfunc.eDatePart.DayOfNthWeekOfMonth : (30 * 10 , None) # 10 months
}
lThreshold = lThresholds.get(self.mDatePart)
if(lThreshold[0] is not None):
return (lDays >= lThreshold[0]);
elif(lThreshold[1] is not None):
return (lSeconds >= lThreshold[1]);
return False;
def compute_date_parts(self, iTimeValues):
lHelper = dtfunc.cDateTime_Helper()
return lHelper.apply_date_time_computer(self.mDatePart, iTimeValues);
def fit(self):
assert(self.mTimeInfo.isPhysicalTime());
lHor = self.mTimeInfo.mHorizon;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
lName = self.getCycleName();
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[lName] = self.compute_date_parts(self.mTrendFrame[self.mTime])
self.mDefaultValue = self.compute_target_means_default_value()
self.mEncodedValueDict = self.compute_target_means_by_cycle_value(self.mCycleFrame, self.getCycleName())
self.mCycleFrame[lName + '_enc'] = self.mCycleFrame[lName].apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
self.mCycleFrame[lName + '_enc'].fillna(self.mDefaultValue, inplace=True);
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name] - self.mCycleFrame[lName + '_enc'];
self.mCycleFrame[lName + '_NotEncoded'] = self.mCycleFrame[lName];
self.mCycleFrame[lName] = self.mCycleFrame[lName + '_enc'];
self.mOutName = self.getCycleName()
#print("encoding '" + lName + "' " + str(self.mEncodedValueDict));
# The longer the seasonal, the more complex it is.
self.mComplexity = len(self.mEncodedValueDict.keys())
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
lDateParts = self.compute_date_parts(df[self.mTime])
df[self.getCycleName()] = lDateParts.apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cBestCycleForTrend(cAbstractCycle):
def __init__(self , trend, criterion):
super().__init__(trend);
self.mCycleFrame = pd.DataFrame()
self.mCyclePerfByLength = {}
self.mBestCycleValueDict = {}
self.mBestCycleLength = None
self.mCriterion = criterion
self.mFormula = "BestCycle"
def getCycleName(self):
return self.mTrend_residue_name + "_bestCycle_by" + self.mCriterion;
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {} if(self.mBestCycleLength is None) else self.mBestCycleValueDict[self.mBestCycleLength]
logger.info("BEST_CYCLE_LENGTH_VALUES " + self.getCycleName() + " " + str(self.mBestCycleLength) + " " + str(self.mDefaultValue) + " " + str(lDict));
def dumpCyclePerfs(self):
print(self.mCyclePerfByLength);
def computeBestCycle(self):
# self.dumpCyclePerfs();
self.mBestCycleLength = None;
lData = self.mCyclePerfByLength.items()
if(len(lData) == 0):
return
lPerf = tsperf.cPerf();
# less MAPE is better, less categories is better, the last is the length to have a total order.
lSortingMethod_By_MAPE = lambda x : (x[1][0], x[0])
lData = sorted(lData, key = lSortingMethod_By_MAPE)
assert(len(lData) > 0)
lBestCriterion = lData[0][1]
lData_smallest = [x for x in lData if lPerf.is_close_criterion_value(self.mOptions.mCycle_Criterion,
x[1][0],
iTolerance = 0.05, iRefValue = lBestCriterion[0])]
lSortingMethod_By_Complexity = lambda x : (x[1][1], x[0])
lData_smallest = sorted(lData_smallest, key = lSortingMethod_By_Complexity)
assert(len(lData_smallest) > 0)
self.mBestCycleLength = lData_smallest[0][0]
self.transformDataset(self.mCycleFrame);
pass
def generate_cycles(self):
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
self.mDefaultValue = self.compute_target_means_default_value();
self.mCyclePerfByLength = {}
lMaxRobustCycleLength = self.mTrendFrame.shape[0]//12;
# print("MAX_ROBUST_CYCLE_LENGTH", self.mTrendFrame.shape[0], lMaxRobustCycleLength);
lCycleLengths = self.mOptions.mCycleLengths or range(2,lMaxRobustCycleLength + 1)
lCycleFrame = | pd.DataFrame() | pandas.DataFrame |
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_utils.validator.report import ValidationReport
from delphi_utils.validator.dynamic import DynamicValidator
class TestReferencePadding:
params = {
"common": {
"data_source": "",
"span_length": 1,
"end_date": "2020-09-02"
}
}
def test_no_padding(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6,
"time_value": pd.date_range(start="2021-01-01", end="2021-01-06")}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
new_ref_df = validator.pad_reference_api_df(
ref_df, test_df, datetime.strptime("2021-01-06", "%Y-%m-%d").date())
assert new_ref_df.equals(ref_df)
def test_half_padding(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_data = {"val": [2, 2, 2, 2, 2, 2], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6,
"time_value": pd.date_range(start="2021-01-01", end="2021-01-06")}
test_data = {"val": [1, 1, 1, 1, 1, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6,
"time_value": pd.date_range(start="2021-01-06", end="2021-01-11")}
ref_df = pd.DataFrame(ref_data)
test_df = pd.DataFrame(test_data)
new_ref_df = validator.pad_reference_api_df(
ref_df, test_df, datetime.strptime("2021-01-15", "%Y-%m-%d").date())
# Check it only takes missing dates - so the last 5 dates
assert new_ref_df.time_value.max() == datetime.strptime("2021-01-11",
"%Y-%m-%d").date()
assert new_ref_df.shape[0] == 11
assert new_ref_df["val"].iloc[5] == 2
def test_full_padding(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_data = {"val": [2, 2, 2, 2, 2, 2], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6,
"time_value": pd.date_range(start="2021-01-01", end="2021-01-06")}
test_data = {"val": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
"se": [np.nan] * 12,
"sample_size": [np.nan] * 12, "geo_id": ["1"] * 12,
"time_value": pd.date_range(start="2021-01-06", end="2021-01-17")}
ref_df = pd.DataFrame(ref_data)
test_df = pd.DataFrame(test_data)
new_ref_df = validator.pad_reference_api_df(
ref_df, test_df, datetime.strptime("2021-01-15", "%Y-%m-%d").date())
# Check it only takes missing dates up to the day before the reference
assert new_ref_df.time_value.max() == datetime.strptime("2021-01-15",
"%Y-%m-%d").date()
assert new_ref_df.shape[0] == 15
assert new_ref_df["val"].iloc[5] == 2
class TestCheckRapidChange:
params = {
"common": {
"data_source": "",
"span_length": 1,
"end_date": "2020-09-02"
}
}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert report.raised_errors[0].check_name == "check_rapid_change_num_rows"
class TestCheckAvgValDiffs:
params = {
"common": {
"data_source": "",
"span_length": 1,
"end_date": "2020-09-02"
}
}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 14, "se": [1, 1, 1, 2, 0, 1, 1]*2,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 14, "se": [np.nan] * 14,
"sample_size": [1, 1, 1, 2, 0, 1, 1]*2, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_val_se_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1, 1]*2, "se": [1, 1, 1, 2, 0, 1, 1]*2,
"sample_size": [1, 1, 1, 2, 0, 1, 1]*2, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_10x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 20, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
ref_data = {"val": [1, 1, 1, 2, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].check_name == "check_test_vs_reference_avg_changed"
def test_100x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 200, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
ref_data = {"val": [1, 1, 1, 2, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].check_name == "check_test_vs_reference_avg_changed"
def test_1000x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 2000, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
ref_data = {"val": [1, 1, 1, 2, 0, 1, 1]*2, "se": [np.nan] * 14,
"sample_size": [np.nan] * 14, "geo_id": ["1"] * 14,
"time_value": ["2021-01-01", "2021-01-02", "2021-01-03", "2021-01-04", "2021-01-05", "2021-01-06", "2021-01-07",
"2021-01-08", "2021-01-09", "2021-01-10", "2021-01-11", "2021-01-12", "2021-01-13", "2021-01-14"]}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].check_name == "check_test_vs_reference_avg_changed"
class TestDataOutlier:
params = {
"common": {
"data_source": "",
"span_length": 1,
"end_date": "2020-09-02"
}
}
pd.set_option("display.max_rows", None, "display.max_columns", None)
# Test to determine outliers based on the row data, has lead and lag outlier
def test_pos_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [100, 100, 100]
ref_data = {"val": ref_val, "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data = {"val": test_val, "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24", end="2020-10-26")}
ref_data2 = {"val": ref_val, "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data2 = {"val": test_val, "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24", end="2020-10-26")}
ref_df = pd.concat(
[pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]).reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_warnings) == 2
assert report.raised_warnings[0].check_name == "check_positive_negative_spikes"
def test_neg_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [100, 101, 100, 101, 100,
100, 100, 100, 100, 100,
100, 102, 100, 100, 100,
100, 100, 101, 100, 100,
100, 100, 100, 99, 100,
100, 98, 100, 100, 100]
test_val = [10, 10, 10]
ref_data = {"val": ref_val, "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": | pd.date_range(start="2020-09-24", end="2020-10-23") | pandas.date_range |
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from Augmenter import Augmenter
from DataLoader import DataLoader
from cnn_classifier import ClassifierCNN
def main():
# unbalanced data = ['insect', 'ecg200', 'gunpoint']
data_name = 'insect'
path = 'C:/Users/letiz/Desktop/Aalto/Bachelor\'s Thesis and Seminar - JOIN.bsc/data'
data = DataLoader(path=path, data_name=data_name, cgan=False, bootstrap_test=True)
X_train, y_train, _, _ = data.get_X_y(one_hot_encoding=False)
# minority class
classes, counts = np.unique(y_train, return_counts=True)
print("Classes: ", classes)
print("Counts: ", counts)
minority = [(x, y) for y, x in sorted(zip(counts, classes))][0]
print("Minority class: ", minority[0])
print("Minority samples: ", minority[1])
majority = [(x, y) for y, x in sorted(zip(counts, classes))][-1]
print("Majority class: ", majority[0])
print("Majority samples: ", majority[1])
fake = []
fake_y = []
if len(np.unique(counts)) == 1:
print("This dataset is balanced")
print("Set the number of fake samples per class you want to generate: ")
n = int(input())
if n > 0:
for c in range(len(classes)):
label = classes[c]
print(f"Class {label} will get {n} more samples.")
take_idx = np.where(y_train == label)[0]
aug = Augmenter(data=X_train.to_numpy()[take_idx], labels=y_train[take_idx])
for i in range(n):
# print("Jittering")
# x, y, idx = aug.jittering(mu=0, sigma=0.001)
# print("Flipping")
# x, y, idx = aug.flipping()
# print("Permutation")
# x, y, idx = aug.permutation(n_segments=7)
# print("AVG_TS_SMOTE")
# x, y, idx = aug.smote_oversampling()
print(f"{i + 1} artificial samples from class {label} done. The seed was {idx}")
fake.append(x)
fake_y.append(y)
for c in range(len(classes)):
samples_needed = majority[1] - counts[c]
label = classes[c]
print(f"Class {label} needs {samples_needed} more samples.")
if samples_needed > 0:
# isolate the samples from the class
take_idx = np.where(y_train == label)[0]
aug = Augmenter(data=X_train.to_numpy()[take_idx], labels=y_train[take_idx])
for i in range(samples_needed):
# print("Jittering")
# x, y, idx = aug.jittering(mu=0, sigma=0.001)
# print("Flipping")
# x, y, idx = aug.flipping()
# print("Permutation")
# x, y, idx = aug.permutation(n_segments=7)
# print("AVG_TS_SMOTE")
# x, y, idx = aug.smote_oversampling()
print(f"{i + 1} artificial samples from class {label} done. The seed was {idx}")
fake.append(x)
fake_y.append(y)
fake_X = | pd.DataFrame(fake) | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_put(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
| _maybe_remove(store, "f") | pandas.tests.io.pytables.common._maybe_remove |
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import preprocessing
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D, proj3d
def extract_close(data_frame):
close_df = data_frame.drop([data_frame.columns[0], "Potter", "Weasley", "Granger", "Trip", "Far"], axis=1)
close_df = close_df.loc[close_df["Close"] > 0]
return close_df
def extract_far(data_frame):
far_df = data_frame.drop([data_frame.columns[0], "Potter", "Weasley", "Granger", "Trip", "Close"], axis=1)
far_df = far_df.loc[far_df["Far"] > 0]
return far_df
#
# def animate(i):
# ax.view_init(elev=35., azim=i)
# return fig,
if __name__ == '__main__':
proj_df = | pd.read_csv("data_proj_414.csv") | pandas.read_csv |
"""<NAME>-2020.
MLearner Machine Learning Library Extensions
Author:<NAME><www.linkedin.com/in/jaisenbe>
License: MIT
"""
import pandas as pd
import numpy as np
import pytest
from mlearner.preprocessing import DataAnalyst
import matplotlib
matplotlib.use('Template')
data = | pd.DataFrame({"a": [0., 1., 1., 0., 1., 1.], "b": [10, 11, 12, 13, 11, 100], "c": ["OK", "OK", "NOK", "OK", "OK", "NOK"]}) | pandas.DataFrame |
# Author: <NAME>
import os
import time
import requests
import pandas as pd
import geopandas as gpd
import numpy as np
import subprocess
import sqlalchemy
import datetime
import multiprocessing as mp
from datetime import datetime
from io import StringIO
pd.set_option('display.max_columns', None) # DEBUG
# Get DB connection information from environment variables specified by docker-compose.yml
pgServer = os.environ['POSTGRES_SERVER']
pgPort = os.environ['POSTGRES_PORT']
pgUser = os.environ['POSTGRES_USER']
pgPass = os.environ['POSTGRES_PASSWORD']
pgDB = os.environ['POSTGRES_DB']
engine = sqlalchemy.create_engine('postgresql+psycopg2://{pgUser}:{pgPass}@{pgServer}:{pgPort}/{db}'.format(
pgUser=pgUser, pgPass=pgPass, pgServer=pgServer, pgPort=pgPort, db=pgDB))
# Set relevant constants
url = r"https://waterservices.usgs.gov/nwis/iv/?format=rdb&stateCd=<REPLACEME>&variable=72019&siteType=GW&siteStatus=active&period=P1D"
lookupCoordsURL = r"https://waterdata.usgs.gov/nwis/inventory?search_site_no=<REPLACEME>&search_site_no_match_type=exact&group_key=NONE&format=sitefile_output&sitefile_output_format=rdb&column_name=dec_lat_va&column_name=dec_long_va&list_of_search_criteria=search_site_no"
states = ["al", "az", "ar", "ca", "co", "ct", "de", "fl", "ga", "id", "il", "in", "ia", "ks", "ky", "la", "me", "md", "ma", "mi", "mn", "ms", "mo", "mt", "ne", "nv", "nh", "nj", "nm", "ny", "nc", "nd", "oh", "ok", "or", "pa", "ri", "sc", "sd", "tn", "tx", "ut", "vt", "va", "wa", "wv", "wi", "wy"]
cpus = int(mp.cpu_count() * float(os.environ['PARALLEL_FACTOR'])) # Set number of processes for parallel processing
runeveryx = int(float(os.environ['RUN_INTERVAL_MIN']) * 60) # Allows for decimal values for minutes. Ex. 7.5
full_data = "" # Not constant, but needs to be globally intialized
def fix_merge(df_merged):
for col in df_merged.columns:
if col == 'lat_y' or col == 'lon_y':
new_name = col.replace('_y','')
df_merged.rename(columns = {col : new_name }, inplace=True)
elif col[-2:] == '_x' and 'lat' not in col and 'lon' not in col:
new_name = col.replace('_x','')
df_merged.rename(columns = {col : new_name }, inplace=True)
elif col[-2:] != '_x' and col[-2:] != '_y':
pass # this is the field the merge is based on, do nothing
else:
df_merged.drop(columns = col, inplace = True)
return df_merged
def get_coords(site_nos):
retList = []
for site_no in site_nos.tolist():
print("Acquiring coord for {}".format(str(site_no)))
coordreq = requests.get(lookupCoordsURL.replace("<REPLACEME>", str(site_no))).text.split("\n")
coordreq = coordreq[len(coordreq) - 2] # last line is blank...
retList.append([site_no, coordreq.split("\t")[0], coordreq.split("\t")[1]])
return pd.DataFrame(retList, columns = ['site_no', 'lat', 'lon'])
def mp_get_data(*args):
state = args[0]
data = requests.get(url.replace("<REPLACEME>", state)).text
ret_data = ""
for line in data.splitlines():
if (not line.startswith('#') and
not line.startswith('5s') and
not line.startswith('agency')): # Remove comments, junk lines and headers from the data
line = line.strip()
if line: # Empty strings (blank lines) filtered out
ret_data += line[:line.find("\tP\t")] + "\n"
return ret_data
def log_result(result):
"""
Callback function for parallel processing.
"""
global full_data
if result:
full_data += result
def parallelize_df(data, func):
global cpus
data_split = np.array_split(data, cpus)
pool = mp.Pool(processes=cpus)
results = pool.map(func, data_split)
if len(results) > 1: # Possible to have only 1 resulting DF.
data = pd.concat(results)
else:
data = results[0]
pool.close()
pool.join()
return data
def load_shp():
# Could be easily configured to load in multiple shapefiles...
global engine
shpFile = "/shapefiles/contiguous_us_states_polygon.shp"
tblName = "contiguous_us_states_polygon"
gdf = gpd.read_file(shpFile)
gdf.to_postgis(name=tblName, con=engine, if_exists='replace', schema='public', index=False)
def contour(times):
engine = sqlalchemy.create_engine('postgresql+psycopg2://{pgUser}:{pgPass}@{pgServer}:{pgPort}/{db}'.format(
pgUser=pgUser, pgPass=pgPass, pgServer=pgServer, pgPort=pgPort, db=pgDB))
ret_list = []
for curTime in times.tolist():
print("Calculating contours for {}".format(str(curTime)))
sql = """SELECT a.depth_towl_ft, ST_Multi(ST_Intersection(a.geom, b.geometry)) AS geom
FROM
(select depth_towl_ft, st_geomfromtext(geom, 4269) AS geom
from plr_contour(
$$select DISTINCT ON (x, y) ST_X(geom) as x, ST_Y(geom) as y, AVG(depth_towl_ft) as z, datetime
from public.usgs_wl_data
where
( (datetime, datetime)
OVERLAPS
( TO_TIMESTAMP('{}', 'YYYY-MM-DD HH24:MI:SS' ), interval '30 minutes' )
) AND depth_towl_ft IS NOT NULL
GROUP BY x, y, datetime
$$
) as geom) AS a
CROSS JOIN public.contiguous_us_states_polygon AS b;
""".format(curTime)
try:
cur_df = pd.read_sql(sql, engine)
cur_df['datetime'] = curTime
ret_list.append(cur_df)
except:
# There are some instances where aggregating to a time with insufficient data results in an error thrown in the R functions.
# These situations occur when the most recent time period is not fully available. IE a run starting at 7:53 AM is aggregated to 8:00 AM, but
# Most of the data required is in the future. Appending an empty DF, we'll fix it in a later run when enough data is available.
ret_list.append(pd.DataFrame([], columns = ['depth_towl_ft', 'datetime', 'geom']))
if len(ret_list) > 1: # Possible to have only 1 resulting DF.
return pd.concat(ret_list)
elif len(ret_list) == 1:
return ret_list[0]
else:
# Return an empty DataFrame, allows it to fail-safe in a lot of places without abusing try/excepts.
return pd.DataFrame([], columns = ['depth_towl_ft', 'datetime', 'geom'])
def main():
global full_data
full_data = "agency\tsite_no\tdatetime\ttz_cd\tdepth_towl_ft\n" # Header
# Begin requesting and processing, parallelized for speed
p = mp.Pool(processes=cpus)
print("Acquiring data for each state...")
for state in states:
p.apply_async(mp_get_data, args = [state], callback = log_result)
p.close()
p.join()
# Set date and acquire known site locations
curDate = datetime.today().strftime('%Y-%m-%d')
try:
print("Checking for known site locations...")
knownSites = pd.read_sql("SELECT DISTINCT site_no, ST_X(geom) as lon, ST_Y(geom) as lat from usgs_wl_data" , engine)
except:
knownSites = None # first run, table is not initialized -- no site locations are known
df = pd.read_table(StringIO(full_data), sep="\t", index_col=False)
df['depth_towl_ft'] = pd.to_numeric(df.depth_towl_ft, errors='coerce') # Filter out garbage values in the depth column
df['uid'] = df['site_no'].astype('str') + "_" + df['datetime'].astype('str')
df['lat'] = np.nan
df['lon'] = np.nan
# Get coords from known sites
try:
df = fix_merge(df.merge(knownSites, how='left', on='site_no'))
except:
pass # Must be the first run
# Get rows still missing coords
df_missing = df[df.lon.isna() | df.lat.isna()].drop_duplicates(subset='site_no')
# Retrieve coordinates from USGS for locations missing coords, parallelized so first run isn't so painful. Only if df_missing isn't empty
if not df_missing.empty:
df_missing = fix_merge(df_missing.merge(parallelize_df(df_missing['site_no'], get_coords), how='left', on='site_no'))
# Update full dataframe with missing coordinates
df = fix_merge(df.merge(df_missing, how='left', on='site_no', validate="many_to_one"))
# Insert to DB
print("Inserting any new data into DB...")
df.to_sql('temptable', con=engine, if_exists='replace', index=False)
query = """ INSERT INTO usgs_wl_data (uid, agency, site_no, datetime, tz_cd, depth_towl_ft, lat, lon)
SELECT t.uid, t.agency, t.site_no, TO_TIMESTAMP(t.datetime, 'YYYY-MM-DD HH24:MI:SS'), t.tz_cd, t.depth_towl_ft::DECIMAL, t.lat::DECIMAL, t.lon::DECIMAL
FROM temptable t
WHERE NOT EXISTS
(SELECT 1 FROM usgs_wl_data f
WHERE t.uid = f.uid)"""
df_query = """ SELECT DISTINCT t.datetime
FROM temptable t
WHERE NOT EXISTS
(SELECT 1 FROM usgs_wl_data f
WHERE t.uid = f.uid)"""
df_contourIntervals = | pd.read_sql(df_query, engine) | pandas.read_sql |
######### imports #########
from ast import arg
from datetime import timedelta
import sys
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_constants import *
from Reff_functions import *
import glob
import os
from sys import argv
import arviz as az
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
from math import ceil
import pickle
from cmdstanpy import CmdStanModel
matplotlib.use("Agg")
from params import (
truncation_days,
start_date,
third_start_date,
alpha_start_date,
omicron_start_date,
omicron_only_date,
omicron_dominance_date,
pop_sizes,
num_forecast_days,
get_all_p_detect_old,
get_all_p_detect,
)
def process_vax_data_array(
data_date,
third_states,
third_end_date,
variant="Delta",
print_latest_date_in_ts=False,
):
"""
Processes the vaccination data to an array for either the Omicron or Delta strain.
"""
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state.loc[
vaccination_by_state["variant"] == variant
]
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
if print_latest_date_in_ts:
# display the latest available date in the NSW data (will be the same date between states)
print(
"Latest date in vaccine data is {}".format(
vaccination_by_state[vaccination_by_state.state == "NSW"].date.values[-1]
)
)
# Get only the dates we need + 1 (this serves as the initial value)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (vaccination_by_state.date <= third_end_date)
]
vaccination_by_state = vaccination_by_state[
vaccination_by_state["state"].isin(third_states)
] # Isolate fitting states
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state = pd.concat(
[vaccination_by_state]
+ [
pd.Series(vaccination_by_state[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# Convert to simple array only useful to pass to stan (index 1 onwards)
vaccination_by_state_array = vaccination_by_state.iloc[:, 1:].to_numpy()
return vaccination_by_state_array
def get_data_for_posterior(data_date):
"""
Read in the various datastreams and combine the samples into a dictionary that we then
dump to a pickle file.
"""
print("Performing inference on state level Reff")
data_date = pd.to_datetime(data_date) # Define data date
print("Data date is {}".format(data_date.strftime("%d%b%Y")))
fit_date = pd.to_datetime(data_date - timedelta(days=truncation_days))
print("Last date in fitting {}".format(fit_date.strftime("%d%b%Y")))
# * Note: 2020-09-09 won't work (for some reason)
# read in microdistancing survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
# read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
# save the case file for convenience
df_state.to_csv("results/cases_" + data_date.strftime("%Y-%m-%d") + ".csv")
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True, moving_window=7)
# df_google = read_in_google(moving=False)
df = df_google.merge(df_Reff[[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]],
on=["date", "state"],
how="inner",
)
######### Create useable dataset #########
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
"NSW",
# "VIC",
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are featured in omicron AND third wave
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
count_by_state = []
respond_by_state = []
mask_wearing_count_by_state = []
mask_wearing_respond_by_state = []
include_in_first_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: dfX.date.values[-1]]
survey_counts = survey_counts_base.loc[: dfX.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: dfX.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: dfX.date.values[-1]]
for state in first_states:
mobility_by_state.append(dfX.loc[dfX.state == state, predictors].values / 100)
mobility_std_by_state.append(
dfX.loc[dfX.state == state, [val + "_std" for val in predictors]].values / 100
)
count_by_state.append(survey_counts.loc[start_date:first_end_date, state].values)
respond_by_state.append(survey_respond.loc[start_date:first_end_date, state].values)
mask_wearing_count_by_state.append(
mask_wearing_counts.loc[start_date:first_end_date, state].values
)
mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[start_date:first_end_date, state].values
)
include_in_first_wave.append(
dfX.loc[dfX.state == state, "is_first_wave"].values
)
# SECOND PHASE
sec_mobility_by_state = []
sec_mobility_std_by_state = []
sec_count_by_state = []
sec_respond_by_state = []
sec_mask_wearing_count_by_state = []
sec_mask_wearing_respond_by_state = []
include_in_sec_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df2X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df2X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df2X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df2X.date.values[-1]]
for state in sec_states:
sec_mobility_by_state.append(
df2X.loc[df2X.state == state, predictors].values / 100
)
sec_mobility_std_by_state.append(
df2X.loc[df2X.state == state, [val + "_std" for val in predictors]].values / 100
)
sec_count_by_state.append(
survey_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_respond_by_state.append(
survey_respond.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[sec_start_date:sec_end_date, state].values
)
include_in_sec_wave.append(df2X.loc[df2X.state == state, "is_sec_wave"].values)
# THIRD WAVE
third_mobility_by_state = []
third_mobility_std_by_state = []
third_count_by_state = []
third_respond_by_state = []
third_mask_wearing_count_by_state = []
third_mask_wearing_respond_by_state = []
include_in_third_wave = []
include_in_omicron_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df3X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df3X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df3X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df3X.date.values[-1]]
for state in third_states:
third_mobility_by_state.append(
df3X.loc[df3X.state == state, predictors].values / 100
)
third_mobility_std_by_state.append(
df3X.loc[df3X.state == state, [val + "_std" for val in predictors]].values / 100
)
third_count_by_state.append(
survey_counts.loc[third_start_date:third_end_date, state].values
)
third_respond_by_state.append(
survey_respond.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[third_start_date:third_end_date, state].values
)
include_in_third_wave.append(
df3X.loc[df3X.state == state, "is_third_wave"].values
)
include_in_omicron_wave.append(
df3X.loc[df3X.state == state, "is_omicron_wave"].values
)
# policy boolean flag for after travel ban in each wave
policy = dfX.loc[
dfX.state == first_states[0], "post_policy"
] # this is the post ban policy
policy_sec_wave = [1] * df2X.loc[df2X.state == sec_states[0]].shape[0]
policy_third_wave = [1] * df3X.loc[df3X.state == third_states[0]].shape[0]
# read in the vaccination data
delta_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Delta",
print_latest_date_in_ts=True,
)
omicron_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Omicron",
)
# Make state by state arrays
state_index = {state: i + 1 for i, state in enumerate(states_to_fit_all_waves)}
# dates to apply alpha in the second wave (this won't allow for VIC to be added as
# the date_ranges are different)
apply_alpha_sec_wave = (
sec_date_range["NSW"] >= pd.to_datetime(alpha_start_date)
).astype(int)
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(third_start_date)
).days
omicron_only_day = (
pd.to_datetime(omicron_only_date) - pd.to_datetime(third_start_date)
).days
heterogeneity_start_day = (
pd.to_datetime("2021-08-20") - pd.to_datetime(third_start_date)
).days
# number of days we fit the average VE over
tau_vax_block_size = 3
# get pop size array
pop_size_array = []
for s in states_to_fit_all_waves:
pop_size_array.append(pop_sizes[s])
p_detect = get_all_p_detect_old(
states=third_states,
end_date=third_end_date,
num_days=df3X.loc[df3X.state == "NSW"].shape[0],
)
df_p_detect = pd.DataFrame(p_detect, columns=third_states)
df_p_detect["date"] = third_date_range["NSW"]
df_p_detect.to_csv("results/CA_" + data_date.strftime("%Y-%m-%d") + ".csv")
# p_detect = get_all_p_detect(
# end_date=third_end_date,
# num_days=df3X.loc[df3X.state == "NSW"].shape[0],
# )
# input data block for stan model
input_data = {
"j_total": len(states_to_fit_all_waves),
"N_first": dfX.loc[dfX.state == first_states[0]].shape[0],
"K": len(predictors),
"j_first": len(first_states),
"Reff": data_by_state["mean"].values,
"mob": mobility_by_state,
"mob_std": mobility_std_by_state,
"sigma2": data_by_state["std"].values ** 2,
"policy": policy.values,
"local": data_by_state["local"].values,
"imported": data_by_state["imported"].values,
"N_sec": df2X.loc[df2X.state == sec_states[0]].shape[0],
"j_sec": len(sec_states),
"Reff_sec": sec_data_by_state["mean"].values,
"mob_sec": sec_mobility_by_state,
"mob_sec_std": sec_mobility_std_by_state,
"sigma2_sec": sec_data_by_state["std"].values ** 2,
"policy_sec": policy_sec_wave,
"local_sec": sec_data_by_state["local"].values,
"imported_sec": sec_data_by_state["imported"].values,
"apply_alpha_sec": apply_alpha_sec_wave,
"N_third": df3X.loc[df3X.state == "NSW"].shape[0],
"j_third": len(third_states),
"Reff_third": third_data_by_state["mean"].values,
"Reff_omicron": third_data_by_state["mean_omicron"].values,
"mob_third": third_mobility_by_state,
"mob_third_std": third_mobility_std_by_state,
"sigma2_third": third_data_by_state["std"].values ** 2,
"sigma2_omicron": third_data_by_state["std_omicron"].values ** 2,
"policy_third": policy_third_wave,
"local_third": third_data_by_state["local"].values,
"imported_third": third_data_by_state["imported"].values,
"count_md": count_by_state,
"respond_md": respond_by_state,
"count_md_sec": sec_count_by_state,
"respond_md_sec": sec_respond_by_state,
"count_md_third": third_count_by_state,
"respond_md_third": third_respond_by_state,
"count_masks": mask_wearing_count_by_state,
"respond_masks": mask_wearing_respond_by_state,
"count_masks_sec": sec_mask_wearing_count_by_state,
"respond_masks_sec": sec_mask_wearing_respond_by_state,
"count_masks_third": third_mask_wearing_count_by_state,
"respond_masks_third": third_mask_wearing_respond_by_state,
"map_to_state_index_first": [state_index[state] for state in first_states],
"map_to_state_index_sec": [state_index[state] for state in sec_states],
"map_to_state_index_third": [state_index[state] for state in third_states],
"total_N_p_sec": sum([sum(x) for x in include_in_sec_wave]).item(),
"total_N_p_third": sum([sum(x) for x in include_in_third_wave]).item(),
"include_in_first": include_in_first_wave,
"include_in_sec": include_in_sec_wave,
"include_in_third": include_in_third_wave,
"pos_starts_sec": np.cumsum([sum(x) for x in include_in_sec_wave]).astype(int).tolist(),
"pos_starts_third": np.cumsum(
[sum(x) for x in include_in_third_wave]
).astype(int).tolist(),
"ve_delta_data": delta_vaccination_by_state_array,
"ve_omicron_data": omicron_vaccination_by_state_array,
"omicron_start_day": omicron_start_day,
"omicron_only_day": omicron_only_day,
"include_in_omicron": include_in_omicron_wave,
"total_N_p_third_omicron": int(sum([sum(x) for x in include_in_omicron_wave]).item()),
"pos_starts_third_omicron": np.cumsum(
[sum(x) for x in include_in_omicron_wave]
).astype(int).tolist(),
'tau_vax_block_size': tau_vax_block_size,
'total_N_p_third_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave])
),
'pos_starts_third_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave]
).astype(int),
'total_N_p_third_omicron_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave])
),
'pos_starts_third_omicron_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave]
).astype(int),
"pop_size_array": pop_size_array,
"heterogeneity_start_day": heterogeneity_start_day,
"p_detect": p_detect,
}
# dump the dictionary to a json file
with open("results/stan_input_data.pkl", "wb") as f:
pickle.dump(input_data, f)
return None
def run_stan(
data_date,
num_chains=4,
num_samples=1000,
num_warmup_samples=500,
max_treedepth=12,
):
"""
Read the input_data.json in and run the stan model.
"""
data_date = pd.to_datetime(data_date)
# read in the input data as a dictionary
with open("results/stan_input_data.pkl", "rb") as f:
input_data = pickle.load(f)
# make results and figs dir
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
os.makedirs(figs_dir, exist_ok=True)
os.makedirs(results_dir, exist_ok=True)
# path to the stan model
# basic model with a switchover between Reffs
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover.stan"
# mixture model with basic susceptible depletion
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection.stan"
# model that incorporates a waning in infection acquired immunity but is coded as a mixture
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix_waning_infection.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection_single_md.stan"
# compile the stan model
model = CmdStanModel(stan_file=rho_model_gamma)
# obtain a posterior sample from the model conditioned on the data
fit = model.sample(
chains=num_chains,
iter_warmup=num_warmup_samples,
iter_sampling=num_samples,
data=input_data,
max_treedepth=max_treedepth,
refresh=10
)
# display convergence diagnostics for the current run
print("===========")
print(fit.diagnose())
print("===========")
# save output file to
fit.save_csvfiles(dir=results_dir)
df_fit = fit.draws_pd()
df_fit.to_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_all_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# save a summary file for all parameters; this involves ESS and ESS/s as well as summary stats
fit_summary = fit.summary()
fit_summary.to_csv(filename)
# now save a small summary to easily view key parameters
pars_of_interest = ["bet[" + str(i + 1) + "]" for i in range(5)]
pars_of_interest = pars_of_interest + ["R_Li[" + str(i + 1) + "]" for i in range(8)]
pars_of_interest = pars_of_interest + [
"R_I",
"R_L",
"theta_md",
"theta_masks",
"sig",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
pars_of_interest = pars_of_interest + [
col for col in df_fit if "phi" in col and "simplex" not in col
]
# save a summary for ease of viewing
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_main_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
fit_summary.loc[pars_of_interest].to_csv(filename)
return None
def plot_and_save_posterior_samples(data_date):
"""
Runs the full suite of plotting.
"""
data_date = pd.to_datetime(data_date) # Define data date
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# read in the posterior sample
samples_mov_gamma = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# * Note: 2020-09-09 won't work (for some reason)
######### Read in microdistancing (md) surveys #########
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append( | pd.read_csv(file, parse_dates=["date"]) | pandas.read_csv |
import pandas as pd
import networkx as nx
import numpy as np
import os
import random
'''
code main goal: make a graph with labels and make a knowledge-graph to the classes.
~_~_~ Graph ~_~_~
Graph nodes: movies
Graph edges: given 2 movies, an edge determined if a cast member play in both of the movies.
Label: the genre of the movie. We treat multi genre as different label. For example: Drama-Comedy and Action-Comedy
treat as different labels.
~_~_~ Knowledge-Graph ~_~_~
Knowledge-Graph nodes: classes that represented by genres types.
Knowledge-Graph edges: Jaccard similarity, which means Intersection over Union, donate weight edges between the classes.
For example: Drama-Comedy and Action-Comedy interception is Comedy (donate 1)
The union is Drama, Action, Comedy (donate 3)
Thus, there is an edge with 1/3 weight between those classes.
'''
class DataCsvToGraph(object):
"""
Class that read and clean the data
For IMDb data set we download 2 csv file
IMDb movies.csv includes 81273 movies with attributes: title, year, genre , etc.
IMDb title_principles.csv includes 38800 movies and 175715 cast names that play among the movies.
"""
def __init__(self, data_paths):
self.data_paths = data_paths
@staticmethod
def drop_columns(df, arr):
for column in arr:
df = df.drop(column, axis=1)
return df
def clean_data_cast(self: None) -> object:
"""
Clean 'IMDb title_principals.csv' data.
:return: Data-Frame with cast ('imdb_name_id') and the movies ('imdb_title_id') they play.
"""
if os.path.exists('pkl_e2v/data_cast_movie.pkl'):
data = pd.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.drop_columns(data, clean_column)
data = data.sort_values('imdb_name_id')
data = pd.DataFrame.dropna(data)
keys = data
keys = keys.drop('imdb_name_id', axis=1)
data = pd.read_pickle('pkl_e2v/data_cast_movie.pkl')
data['tmp'] = keys['imdb_title_id']
else:
data = | pd.read_csv(self.data_paths['cast']) | pandas.read_csv |
import streamlit as st
import requests
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
import random
import re
import urllib.request
from PIL import Image
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as mpim
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.image as mpimg
from io import BytesIO
import urllib.request
#%matplotlib inline
def app():
st.markdown("""
This app performs simple webscraping of NFL Football player stats data and creates a radar chart that we will be using as a common metric in order to have a visual representation of the performance done
by each team (according to the passing category)!
* **Python libraries:** base64, pandas, streamlit, numpy, matplotlib, seaborn
* **Data source:** [pro-football-reference.com](https://www.pro-football-reference.com/).
Data is from 1932 to 2005.
""")
# calculating current nfl season as most recent season available to scrape
last_passer_rating_season = 2006
st.sidebar.header('User Customization')
selected_year = st.sidebar.selectbox('Year', list(reversed(range(1932,last_passer_rating_season))))
@st.cache
def scraping_QB_Stats(selected_year):
players = []
url = 'https://www.pro-football-reference.com/years/'+ str(selected_year) + '/passing.htm'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0'}
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
href = soup.find('table', {'id': 'passing'})
href_th = soup.find_all('th',{'class':'right'})
href_tbody = soup.find_all('tbody')
href_tr = soup.find_all('tr')
for i in href_tbody:
href_tr_data = i.find_all('tr')
for i in href_tr_data:
while True:
try:
'''Rank of Player Collected'''
ranking_search = i.find('th', {'data-stat':'ranker'})
ranking = ranking_search['csk']
'''Name of Player Collected'''
names_search = i.find('td', {'data-stat':'player'})
#names = names_search['csk']
names_text = names_search.find('a')
names = names_text.text
'''Team of PLayer Collected'''
team_search = i.find('td', {'data-stat':'team'})
team_name = team_search.find('a')
team = team_name['title']
'''Age of Player Collected '''
age_search = i.find('td',{'data-stat':'age'})
age = age_search.text
'''Games and Games played by Player Collected'''
games_search = i.find('td',{'data-stat':'g'})
games = games_search.text
games_played_search = i.find('td',{'data-stat':'gs'})
games_played = games_played_search.text
'''QB Record and Percentage Wins for Players Collected'''
#qbRec_search = i.find('td',{'data-stat':'qb_rec'})
#bRec_percentage = qbRec_search['csk']
#qbRec = qbRec_search.text
'''Passes Completed of Player Collected'''
passes_completed_search = i.find('td',{'data-stat':'pass_cmp'})
passes_completed = passes_completed_search.text
'''Passes Attempted of Player Collected'''
passes_attempted_search = i.find('td',{'data-stat':'pass_att'})
passes_attempted = passes_attempted_search.text
'''Completion Percentage of Player Collected'''
completion_percentage_search = i.find('td',{'data-stat':'pass_cmp_perc'})
completion_percentage = completion_percentage_search.text
'''Passing Yards of Player Collected'''
passing_yards_search = i.find('td',{'data-stat':'pass_yds'})
passing_yards = passing_yards_search.text
'''Passing Touchdowns of Player Collected'''
passing_touchdowns_search = i.find('td',{'data-stat':'pass_td'})
passing_touchdowns = passing_touchdowns_search.text
'''Touchdown Percentage of Player Collected'''
touchdown_percentage_search = i.find('td',{'data-stat':'pass_td_perc'})
touchdown_percentage = touchdown_percentage_search.text
'''Interceptions of Player Collected'''
interceptions_search = i.find('td',{'data-stat':'pass_int'})
interceptions = interceptions_search.text
'''Interception Percentage of Player Collected'''
interception_percentage_search = i.find('td',{'data-stat':'pass_int_perc'})
interception_percentage = interception_percentage_search.text
'''First Downs of Player Collected'''
#firstdowns_search = i.find('td',{'data-stat':'pass_first_down'})
#firstdowns = firstdowns_search.text
'''Longest Pass of Player Collected'''
pass_long_search = i.find('td',{'data-stat':'pass_long'})
pass_long = pass_long_search.text
'''Yards per Attempt of Player Collected'''
yards_per_attempt_search = i.find('td',{'data-stat':'pass_yds_per_att'})
yards_per_attempt = yards_per_attempt_search.text
'''Adjusted Yards per Attempt of Players Collected'''
adj_yards_per_attempt_search = i.find('td',{'data-stat':'pass_adj_yds_per_att'})
adj_yards_per_attempt = adj_yards_per_attempt_search.text
'''Yards per Completion of Players Collected'''
yards_per_completion_search = i.find('td',{'data-stat':'pass_yds_per_cmp'})
yards_per_completion = yards_per_completion_search.text
'''Yards per Game'''
yards_per_game_search = i.find('td',{'data-stat':'pass_yds_per_g'})
yards_per_game = yards_per_game_search.text
'''Rating'''
passer_rating_search = i.find('td',{'data-stat':'pass_rating'})
passer_rating = passer_rating_search.text
#Formatting Data Collected
player = { "Player": names, "Team": team, "Age": age, "Games Played": games, "Games Started": games_played,
"Passes Completed": passes_completed, "Passes Attempted": passes_attempted, "Completion Percentage": completion_percentage, "Passing Yards": passing_yards, "Passing Touchdowns": passing_touchdowns,
"Touchdown Percentage": touchdown_percentage, "Interceptions": interceptions, "Interceptions Percentage": interception_percentage, "Longest Pass": pass_long,
"Yards Per Attempt": yards_per_attempt, "Adjusted Yards Per Attempt": adj_yards_per_attempt, "Yards per Completion": yards_per_completion, "Yards Per Game": yards_per_game,
"Passer Rating": passer_rating}
#Appending Each player to Players List
players.append(player)
#print(ranking, names, team, age, games, games_played)
break
except:
break
df = pd.DataFrame(players)
#df.to_csv("NFL_Player_QB_Search_Passer_Rating_Era.csv")
#print(df)
return df
df = scraping_QB_Stats(selected_year)
#########################################################################################
# Player Image Scraper Starts Here
def load_data(selected_year):
player_images = []
url = 'https://www.pro-football-reference.com/years/' + str(selected_year) + '/passing.htm'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0'}
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
href = soup.find('table', {'id': 'passing'})
href_th = soup.find_all('th',{'class':'right'})
href_tbody = soup.find_all('tbody')
href_tr = soup.find_all('tr')
for i in href_tbody:
href_tr_data = i.find_all('tr')
for i in href_tr_data:
while True:
try:
names_search = i.find('td', {'data-stat':'player'})
#names = names_search['csk']
names_text = names_search.find('a')
names = names_text.text
for link in names_search.find_all('a', href=True):
player_link = link['href']
base = 'https://www.pro-football-reference.com'
url = base + str(player_link)
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
player_img = soup.find('div', {'class': 'media-item'})
img = player_img.find('img')
img_src = img['src']
#print(img_src)
player_image = {
"Player": names,
"Player_Image": img_src
}
player_images.append(player_image)
break
except:
break
df3 = | pd.DataFrame(player_images) | pandas.DataFrame |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = | pd.Series(["a", "b"]) | pandas.Series |
Subsets and Splits