prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
import sys
import copy
import time
import datetime
import importlib
from abc import ABC
from pathlib import Path
from typing import Iterable, Type
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import fire
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
from loguru import logger
from yahooquery import Ticker
from dateutil.tz import tzlocal
from qlib.utils import code_to_fname, fname_to_code
CUR_DIR = Path(__file__).resolve().parent
sys.path.append(str(CUR_DIR.parent.parent))
from data_collector.utils import get_calendar_list, get_hs_stock_symbols, get_us_stock_symbols
INDEX_BENCH_URL = "http://push2his.eastmoney.com/api/qt/stock/kline/get?secid=1.{index_code}&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg={begin}&end={end}"
REGION_CN = "CN"
REGION_US = "US"
class YahooData:
START_DATETIME = pd.Timestamp("2000-01-01")
HIGH_FREQ_START_DATETIME = pd.Timestamp(datetime.datetime.now() - pd.Timedelta(days=5 * 6))
END_DATETIME = pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1))
INTERVAL_1min = "1min"
INTERVAL_1d = "1d"
def __init__(
self,
timezone: str = None,
start=None,
end=None,
interval="1d",
delay=0,
show_1min_logging: bool = False,
):
"""
Parameters
----------
timezone: str
The timezone where the data is located
delay: float
time.sleep(delay), default 0
interval: str
freq, value from [1min, 1d], default 1min
start: str
start datetime, default None
end: str
end datetime, default None
show_1min_logging: bool
show 1min logging, by default False; if True, there may be many warning logs
"""
self._timezone = tzlocal() if timezone is None else timezone
self._delay = delay
self._interval = interval
self._show_1min_logging = show_1min_logging
self.start_datetime = pd.Timestamp(str(start)) if start else self.START_DATETIME
self.end_datetime = min(pd.Timestamp(str(end)) if end else self.END_DATETIME, self.END_DATETIME)
if self._interval == self.INTERVAL_1min:
self.start_datetime = max(self.start_datetime, self.HIGH_FREQ_START_DATETIME)
elif self._interval == self.INTERVAL_1d:
pass
else:
raise ValueError(f"interval error: {self._interval}")
# using for 1min
self._next_datetime = self.convert_datetime(self.start_datetime.date() + pd.Timedelta(days=1), self._timezone)
self._latest_datetime = self.convert_datetime(self.end_datetime.date(), self._timezone)
self.start_datetime = self.convert_datetime(self.start_datetime, self._timezone)
self.end_datetime = self.convert_datetime(self.end_datetime, self._timezone)
@staticmethod
def convert_datetime(dt: [pd.Timestamp, datetime.date, str], timezone):
try:
dt = pd.Timestamp(dt, tz=timezone).timestamp()
dt = pd.Timestamp(dt, tz=tzlocal(), unit="s")
except ValueError as e:
pass
return dt
def _sleep(self):
time.sleep(self._delay)
@staticmethod
def get_data_from_remote(symbol, interval, start, end, show_1min_logging: bool = False):
error_msg = f"{symbol}-{interval}-{start}-{end}"
def _show_logging_func():
if interval == YahooData.INTERVAL_1min and show_1min_logging:
logger.warning(f"{error_msg}:{_resp}")
interval = "1m" if interval in ["1m", "1min"] else interval
try:
_resp = Ticker(symbol, asynchronous=False).history(interval=interval, start=start, end=end)
if isinstance(_resp, pd.DataFrame):
return _resp.reset_index()
elif isinstance(_resp, dict):
_temp_data = _resp.get(symbol, {})
if isinstance(_temp_data, str) or (
isinstance(_resp, dict) and _temp_data.get("indicators", {}).get("quote", None) is None
):
_show_logging_func()
else:
_show_logging_func()
except Exception as e:
logger.warning(f"{error_msg}:{e}")
def get_data(self, symbol: str) -> [pd.DataFrame]:
def _get_simple(start_, end_):
self._sleep()
_remote_interval = "1m" if self._interval == self.INTERVAL_1min else self._interval
return self.get_data_from_remote(
symbol,
interval=_remote_interval,
start=start_,
end=end_,
show_1min_logging=self._show_1min_logging,
)
_result = None
if self._interval == self.INTERVAL_1d:
_result = _get_simple(self.start_datetime, self.end_datetime)
elif self._interval == self.INTERVAL_1min:
if self._next_datetime >= self._latest_datetime:
_result = _get_simple(self.start_datetime, self.end_datetime)
else:
_res = []
def _get_multi(start_, end_):
_resp = _get_simple(start_, end_)
if _resp is not None and not _resp.empty:
_res.append(_resp)
for _s, _e in (
(self.start_datetime, self._next_datetime),
(self._latest_datetime, self.end_datetime),
):
_get_multi(_s, _e)
for _start in pd.date_range(self._next_datetime, self._latest_datetime, closed="left"):
_end = _start + pd.Timedelta(days=1)
_get_multi(_start, _end)
if _res:
_result = | pd.concat(_res, sort=False) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from pandas.plotting import autocorrelation_plot
from keras import Sequential
from tensorflow.python.keras.layers.recurrent import LSTM
df = | pd.read_csv(r'C:\Users\Michael\Desktop\pwrball_rand\pwr_ball - Copy.csv') | pandas.read_csv |
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Generic,
Hashable,
Iterator,
Sequence,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
F,
FrameOrSeries,
Shape,
final,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_cast_result_dtype,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCCategoricalIndex
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import (
base,
grouper,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
ensure_index,
)
from pandas.core.internals import ArrayManager
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == object:
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: np.ndarray | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: FrameOrSeries, axis: int = 0
) -> Iterator[tuple[Hashable, FrameOrSeries]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouper
@final
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(comp_ids, ngroups, self.levels, self.codes)
@final
def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
result_values = None
if data.ndim == 2 and any(
isinstance(x, ExtensionArray) for x in data._iter_column_arrays()
):
# calling splitter.fast_apply will raise TypeError via apply_frame_axis0
# if we pass EA instead of ndarray
# TODO: can we have a workaround for EAs backed by ndarray?
pass
elif isinstance(data._mgr, ArrayManager):
# TODO(ArrayManager) don't use fast_apply / libreduction.apply_frame_axis0
# for now -> relies on BlockManager internals
pass
elif (
com.get_callable_name(f) not in base.plotting_methods
and isinstance(splitter, FrameSplitter)
and axis == 0
# fast_apply/libreduction doesn't allow non-numpy backed indexes
and not data.index._has_complex_internals
):
try:
sdata = splitter.sorted_data
result_values, mutated = splitter.fast_apply(f, sdata, group_keys)
except IndexError:
# This is a rare case in which re-running in python-space may
# make a difference, see test_apply_mutate.test_mutate_groups
pass
else:
# If the fast apply path could be used we can return here.
# Otherwise we need to fall back to the slow implementation.
if len(result_values) == len(group_keys):
return group_keys, result_values, mutated
if result_values is None:
# result_values is None if fast apply path wasn't taken
# or fast apply aborted with an unexpected exception.
# In either case, initialize the result list and perform
# the slow iteration.
result_values = []
skip_first = False
else:
# If result_values is not None we're in the case that the
# fast apply loop was broken prematurely but we have
# already the result for the first group which we can reuse.
skip_first = True
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
if skip_first:
# pop the first item from the front of the iterator
next(zipped)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1 and isinstance(
self.result_index, ABCCategoricalIndex
):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroup = self.group_info
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouper for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> np.ndarray:
# return the codes of items in original grouped axis
codes, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((codes, self.indexer))
codes = codes[sorter]
return codes
@final
def _get_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]:
all_codes = self.codes
if len(all_codes) > 1:
group_index = get_group_index(all_codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index))
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
comp_ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(comp_ids, obs_ids, self.shape, codes, xnull=True)
@cache_readonly
def result_index(self) -> Index:
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[Index]:
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].result_index]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.result_index.take(codes)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _ea_wrap_cython_operation(
self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
orig_values = values
if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
values = values.view("M8[ns]")
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
if how in ["rank"]:
# preserve float64 dtype
return res_values
res_values = res_values.astype("i8", copy=False)
result = type(orig_values)(res_values, dtype=orig_values.dtype)
return result
elif is_integer_dtype(values.dtype) or is_bool_dtype(values.dtype):
# IntegerArray or BooleanArray
values = values.to_numpy("float64", na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
return res_values
elif is_float_dtype(values.dtype):
# FloatingArray
values = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
result = type(orig_values)._from_sequence(res_values)
return result
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
@final
def _cython_operation(
self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
orig_values = values
assert kind in ["transform", "aggregate"]
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 1, axis
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
cy_op = WrappedCythonOp(kind=kind, how=how)
# can we do this operation with our cython functions
# if not raise NotImplementedError
cy_op.disallow_invalid_ops(dtype, is_numeric)
if is_extension_array_dtype(dtype):
return self._ea_wrap_cython_operation(
kind, values, how, axis, min_count, **kwargs
)
elif values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
res = self._cython_operation(
kind=kind,
values=values2d,
how=how,
axis=1,
min_count=min_count,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.astype("int64")
elif is_integer_dtype(dtype):
# e.g. uint8 -> uint64, int16 -> int64
dtype = dtype.kind + "8"
values = values.astype(dtype, copy=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
ngroups = self.ngroups
comp_ids, _, _ = self.group_info
assert axis == 1
values = values.T
out_shape = cy_op.get_output_shape(ngroups, values)
func, values = cy_op.get_cython_func_and_vals(values, is_numeric)
out_dtype = cy_op.get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if how in ["min", "max"]:
func(
result,
counts,
values,
comp_ids,
min_count,
is_datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, min_count)
elif kind == "transform":
# TODO: min_count
func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
if kind == "aggregate":
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
cutoff = max(1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
if self._filter_empty_groups and not counts.all():
assert result.ndim != 2
result = result[counts > 0]
result = result.T
if how not in base.cython_cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cython_cast_blocklist we get here
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
op_result = maybe_downcast_to_dtype(result, dtype)
else:
op_result = result
return op_result
def agg_series(self, obj: Series, func: F):
# Caller is responsible for checking ngroups != 0
assert self.ngroups != 0
if len(obj) == 0:
# SeriesGrouper would raise if we were to call _aggregate_series_fast
return self._aggregate_series_pure_python(obj, func)
elif is_extension_array_dtype(obj.dtype):
# _aggregate_series_fast would raise TypeError when
# calling libreduction.Slider
# In the datetime64tz case it would incorrectly cast to tz-naive
# TODO: can we get a performant workaround for EAs backed by ndarray?
return self._aggregate_series_pure_python(obj, func)
elif obj.index._has_complex_internals:
# Preempt TypeError in _aggregate_series_fast
return self._aggregate_series_pure_python(obj, func)
try:
return self._aggregate_series_fast(obj, func)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in libreduction
pass
else:
raise
return self._aggregate_series_pure_python(obj, func)
@final
def _aggregate_series_fast(self, obj: Series, func: F):
# At this point we have already checked that
# - obj.index is not a MultiIndex
# - obj is backed by an ndarray, not ExtensionArray
# - len(obj) > 0
# - ngroups != 0
func = com.is_builtin_func(func)
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer)
group_index = group_index.take(indexer)
grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups)
result, counts = grouper.get_result()
return result, counts
@final
def _aggregate_series_pure_python(self, obj: Series, func: F):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
splitter = get_splitter(obj, group_index, ngroups, axis=0)
for label, group in enumerate(splitter):
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
res = func(group)
res = libreduction.extract_result(res)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(res, 0)
initialized = True
counts[label] = group.shape[0]
result[label] = res
out = lib.maybe_convert_objects(result, try_float=False)
out = maybe_cast_pointwise_result(out, obj.dtype, numeric_only=True)
return out, counts
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
filter_empty : bool, default False
mutated : bool, default False
indexer : np.ndarray[np.intp]
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
def __init__(
self,
bins,
binlabels,
filter_empty: bool = False,
mutated: bool = False,
indexer=None,
):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
self.indexer = indexer
# These lengths must match, otherwise we could call agg_series
# with empty self.bins, which would raise in libreduction.
assert len(self.binlabels) == len(self.bins)
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {
key: value
for key, value in zip(self.binlabels, self.bins)
if key is not NaT
}
return result
@property
def nkeys(self) -> int:
return 1
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self
def get_iterator(self, data: FrameOrSeries, axis: int = 0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if axis == 0:
slicer = lambda start, edge: data.iloc[start:edge]
else:
slicer = lambda start, edge: data.iloc[:, start:edge]
length = len(data.axes[axis])
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
ensure_platform_int(comp_ids),
obs_group_ids.astype("int64", copy=False),
ngroups,
)
@cache_readonly
def reconstructed_codes(self) -> list[np.ndarray]:
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self) -> list[Index]:
return [self.binlabels]
@property
def names(self) -> list[Hashable]:
return [self.binlabels.name]
@property
def groupings(self) -> list[grouper.Grouping]:
return [
grouper.Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)
]
def agg_series(self, obj: Series, func: F):
# Caller is responsible for checking ngroups != 0
assert self.ngroups != 0
assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result
if is_extension_array_dtype(obj.dtype):
# preempt SeriesBinGrouper from raising TypeError
return self._aggregate_series_pure_python(obj, func)
grouper = libreduction.SeriesBinGrouper(obj, func, self.bins)
return grouper.get_result()
def _is_indexed_like(obj, axes, axis: int) -> bool:
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.axes[axis].equals(axes[axis])
elif isinstance(obj, DataFrame):
return obj.axes[axis].equals(axes[axis])
return False
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(Generic[FrameOrSeries]):
def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0):
self.data = data
self.labels = ensure_platform_int(labels) # _should_ already be np.intp
self.ngroups = ngroups
self.axis = axis
assert isinstance(axis, int), axis
@cache_readonly
def slabels(self) -> np.ndarray: # np.ndarray[np.intp]
# Sorted labels
return self.labels.take(self._sort_idx)
@cache_readonly
def _sort_idx(self) -> np.ndarray: # np.ndarray[np.intp]
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self.sorted_data
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for start, end in zip(starts, ends):
yield self._chop(sdata, slice(start, end))
@cache_readonly
def sorted_data(self) -> FrameOrSeries:
return self.data.take(self._sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj: slice) -> NDFrame:
raise AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
# __finalize__ not called here, must be applied by caller if applicable
# fastpath equivalent to:
# `return sdata._constructor(mgr, name=sdata.name, fastpath=True)`
obj = type(sdata)._from_mgr(mgr)
object.__setattr__(obj, "_flags", sdata._flags)
object.__setattr__(obj, "_name", sdata._name)
return obj
class FrameSplitter(DataSplitter):
def fast_apply(self, f: F, sdata: FrameOrSeries, names):
# must return keys::list, values::list, mutated::bool
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
return | libreduction.apply_frame_axis0(sdata, f, names, starts, ends) | pandas._libs.reduction.apply_frame_axis0 |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = | Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4]) | pandas.Series |
import hashlib
import json
import logging
import random
import os
import signal
import numpy as np
import torch
from requests.exceptions import ConnectionError
from torch import multiprocessing as mp
import mlflow
from copy import deepcopy
import pandas as pd
from tqdm import tqdm
from farm.visual.ascii.images import WELCOME_BARN, WORKER_M, WORKER_F, WORKER_X
logger = logging.getLogger(__name__)
def set_all_seeds(seed, deterministic_cudnn=False):
"""
Setting multiple seeds to make runs reproducible.
Important: Enabling `deterministic_cudnn` gives you full reproducibility with CUDA,
but might slow down your training (see https://pytorch.org/docs/stable/notes/randomness.html#cudnn) !
:param seed:number to use as seed
:type seed: int
:param deterministic_torch: Enable for full reproducibility when using CUDA. Caution: might slow down training.
:type deterministic_cudnn: bool
:return: None
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed_all(seed)
if deterministic_cudnn:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def calc_chunksize(num_dicts, min_chunksize=4, max_chunksize=2000, max_processes=128):
num_cpus = min(mp.cpu_count() - 1 or 1, max_processes) # -1 to keep a CPU core free for the main process
dicts_per_cpu = np.ceil(num_dicts / num_cpus)
# automatic adjustment of multiprocessing chunksize
# for small files (containing few dicts) we want small chunksize to ulitize all available cores but never less
# than 2, because we need it to sample another random sentence in LM finetuning
# for large files we want to minimize processor spawning without giving too much data to one process, so we
# clip it at 5k
multiprocessing_chunk_size = int(np.clip((np.ceil(dicts_per_cpu / 5)), a_min=min_chunksize, a_max=max_chunksize))
# This lets us avoid cases in lm_finetuning where a chunk only has a single doc and hence cannot pick
# a valid next sentence substitute from another document
if num_dicts != 1:
while num_dicts % multiprocessing_chunk_size == 1:
multiprocessing_chunk_size -= -1
dict_batches_to_process = int(num_dicts / multiprocessing_chunk_size)
num_processes = min(num_cpus, dict_batches_to_process) or 1
return multiprocessing_chunk_size, num_processes
def initialize_device_settings(use_cuda, local_rank=-1, use_amp=None):
if not use_cuda:
device = torch.device("cpu")
n_gpu = 0
elif local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not torch.cuda.is_available():
n_gpu = 0
else:
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
logger.info(
"device: {} n_gpu: {}, distributed training: {}, automatic mixed precision training: {}".format(
device, n_gpu, bool(local_rank != -1), use_amp
)
)
return device, n_gpu
class BaseMLLogger:
"""
Base class for tracking experiments.
This class can be extended to implement custom logging backends like MLFlow, Tensorboard, or Sacred.
"""
def __init__(self, tracking_uri, **kwargs):
self.tracking_uri = tracking_uri
print(WELCOME_BARN)
def init_experiment(self, tracking_uri):
raise NotImplementedError()
@classmethod
def log_metrics(cls, metrics, step):
raise NotImplementedError()
@classmethod
def log_artifacts(cls, self):
raise NotImplementedError()
@classmethod
def log_params(cls, params):
raise NotImplementedError()
class MLFlowLogger(BaseMLLogger):
"""
Logger for MLFlow experiment tracking.
"""
def init_experiment(self, experiment_name, run_name=None, nested=True):
try:
mlflow.set_tracking_uri(self.tracking_uri)
mlflow.set_experiment(experiment_name)
mlflow.start_run(run_name=run_name, nested=nested)
except ConnectionError:
raise Exception(
f"MLFlow cannot connect to the remote server at {self.tracking_uri}.\n"
f"MLFlow also supports logging runs locally to files. Set the MLFlowLogger "
f"tracking_uri to an empty string to use that."
)
@classmethod
def log_metrics(cls, metrics, step):
try:
mlflow.log_metrics(metrics, step=step)
except ConnectionError:
logger.warning(f"ConnectionError in logging metrics to MLFlow.")
except Exception as e:
logger.warning(f"Failed to log metrics: {e}")
@classmethod
def log_params(cls, params):
try:
mlflow.log_params(params)
except ConnectionError:
logger.warning("ConnectionError in logging params to MLFlow")
except Exception as e:
logger.warning(f"Failed to log params: {e}")
@classmethod
def log_artifacts(cls, dir_path, artifact_path=None):
try:
mlflow.log_artifacts(dir_path, artifact_path)
except ConnectionError:
logger.warning(f"ConnectionError in logging artifacts to MLFlow")
except Exception as e:
logger.warning(f"Failed to log artifacts: {e}")
@classmethod
def end_run(cls):
mlflow.end_run()
class TensorBoardLogger(BaseMLLogger):
"""
PyTorch TensorBoard Logger
"""
def __init__(self, **kwargs):
from tensorboardX import SummaryWriter
TensorBoardLogger.summary_writer = SummaryWriter()
super().__init__(**kwargs)
@classmethod
def log_metrics(cls, metrics, step):
for key, value in metrics.items():
TensorBoardLogger.summary_writer.add_scalar(
tag=key, scalar_value=value, global_step=step
)
@classmethod
def log_params(cls, params):
for key, value in params.items():
TensorBoardLogger.summary_writer.add_text(tag=key, text_string=str(value))
def to_numpy(container):
try:
return container.cpu().numpy()
except AttributeError:
return container
def convert_iob_to_simple_tags(preds, spans):
contains_named_entity = len([x for x in preds if "B-" in x]) != 0
simple_tags = []
merged_spans = []
open_tag = False
for pred, span in zip(preds, spans):
# no entity
if not ("B-" in pred or "I-" in pred):
if open_tag:
# end of one tag
merged_spans.append(cur_span)
simple_tags.append(cur_tag)
open_tag = False
continue
# new span starting
elif "B-" in pred:
if open_tag:
# end of one tag
merged_spans.append(cur_span)
simple_tags.append(cur_tag)
cur_tag = pred.replace("B-", "")
cur_span = span
open_tag = True
elif "I-" in pred:
this_tag = pred.replace("I-", "")
if open_tag and this_tag == cur_tag:
cur_span["end"] = span["end"]
elif open_tag:
# end of one tag
merged_spans.append(cur_span)
simple_tags.append(cur_tag)
open_tag = False
if open_tag:
merged_spans.append(cur_span)
simple_tags.append(cur_tag)
open_tag = False
if contains_named_entity and len(simple_tags) == 0:
raise Exception("Predicted Named Entities lost when converting from IOB to simple tags. Please check the format"
"of the training data adheres to either adheres to IOB2 format or is converted when "
"read_ner_file() is called.")
return simple_tags, merged_spans
def flatten_list(nested_list):
"""Flatten an arbitrarily nested list, without recursion (to avoid
stack overflows). Returns a new list, the original list is unchanged.
>> list(flatten_list([1, 2, 3, [4], [], [[[[[[[[[5]]]]]]]]]]))
[1, 2, 3, 4, 5]
>> list(flatten_list([[1, 2], 3]))
[1, 2, 3]
"""
nested_list = deepcopy(nested_list)
while nested_list:
sublist = nested_list.pop(0)
if isinstance(sublist, list):
nested_list = sublist + nested_list
else:
yield sublist
def log_ascii_workers(n, logger):
m_worker_lines = WORKER_M.split("\n")
f_worker_lines = WORKER_F.split("\n")
x_worker_lines = WORKER_X.split("\n")
all_worker_lines = []
for i in range(n):
rand = np.random.randint(low=0,high=3)
if(rand % 3 == 0):
all_worker_lines.append(f_worker_lines)
elif(rand % 3 == 1):
all_worker_lines.append(m_worker_lines)
else:
all_worker_lines.append(x_worker_lines)
zipped = zip(*all_worker_lines)
for z in zipped:
logger.info(" ".join(z))
def format_log(ascii, logger):
ascii_lines = ascii.split("\n")
for l in ascii_lines:
logger.info(l)
def get_dict_checksum(payload_dict):
"""
Get MD5 checksum for a dict.
"""
checksum = hashlib.md5(json.dumps(payload_dict, sort_keys=True).encode("utf-8")).hexdigest()
return checksum
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def get_dict_checksum(payload_dict):
"""
Get MD5 checksum for a dict.
"""
checksum = hashlib.md5(json.dumps(payload_dict, sort_keys=True).encode("utf-8")).hexdigest()
return checksum
def reformat_msmarco_train(filename, output_filename):
"""
Given a df of structure [query, pos_passage, neg_passage], this function converts it to [query, passage, label]
"""
print("Reformatting MSMarco train data...")
df = pd.read_csv(filename, header=None, sep="\t")
samples = []
for i, row in tqdm(df.iterrows()):
query = row[0]
pos = row[1]
neg = row[2]
samples.append([query, pos, 1])
samples.append([query, neg, 0])
with open(output_filename, "w") as f:
f.write("text\ttext_b\tlabel\n")
for (query, passage, label) in samples:
f.write(f"{query}\t{passage}\t{label}\n")
print(f"MSMarco train data saved at {output_filename}")
def reformat_msmarco_dev(queries_filename, passages_filename, qrels_filename, top1000_filename, output_filename):
print("Reformatting MSMarco dev data...")
top1000_file = open(top1000_filename)
qrels_file = open(qrels_filename)
queries_file = open(queries_filename)
passages_file = open(passages_filename)
# Generate a top1000 dict
top1000 = dict()
for l in tqdm(top1000_file):
qid, pid, _, _ = l.split("\t")
if qid not in top1000:
top1000[qid] = []
top1000[qid].append(pid)
# Generate a qrels dict
qrels = dict()
for l in qrels_file:
qid, _, pid, _ = l.split("\t")
if qid not in qrels:
qrels[qid] = []
qrels[qid].append(pid)
# Generate a queries dict
queries = dict()
for l in queries_file:
qid, query = l.split("\t")
queries[qid] = query[:-1]
# Generate a passages dict
passages = dict()
for l in tqdm(passages_file):
pid, passage = l.split("\t")
passages[pid] = passage[:-1]
# Generate dict with all needed info
final = dict()
for qid in tqdm(top1000):
if qid not in final:
final[qid] = []
query = queries[qid]
curr_qrel = qrels[qid]
curr_top1000 = top1000[qid]
for ct in curr_top1000:
is_relevant = int(ct in curr_qrel)
passage = passages[ct]
quad = list([query, ct, passage, is_relevant])
final[qid].append(quad)
# Flatten the structure of final and convert to df
records = []
for k, v in tqdm(final.items()):
for x in v:
records.append([k] + x)
df = | pd.DataFrame(records, columns=["qid", "text", "pid", "text_b", "label"]) | pandas.DataFrame |
import sbatch_prepare as sp
import path_manipulate as pm
import os
import time
import traceback
from pandas import DataFrame as df
from pandas import Series
from mpi4py.futures import MPIPoolExecutor
res_columns = ['obj','std',
'k1.value','k1.grad','k1.std',
'k2.value','k2.grad','k2.std',
'k3.value','k3.grad','k3.std',
'k4.value','k4.grad','k4.std',
'k5.value','k5.grad','k5.std']
result_filename = 'sGC_parameter_estimation_homo.csv'
#generate result file path
def get_result_file_path(p,r,o):
prefix = '~/projects/slurm-copasi/homogeneity/task3/'
file = '{0}_process/{1}_repeats/results/\
two_step_sGC_activation.cps.dat{2}'.format(p,r,o)
return prefix + file
def skip_lines(f, num):
for i in range(num):
next(f)
#extract fitting results
def get_entry(filepath):
start = time.perf_counter()
with open(filepath, 'r') as f:
try:
entry = Series(dtype=float)
while next(f) != 'Parameter Estimation Result:\n':
continue
next(f)
entry['obj'] = next(f).split(':',1)[1].strip()
entry['std'] = next(f).split(':',1)[1].strip()
skip_lines(f, 5)
for i in range(1,6):
current_line = next(f).split(':',1)[1].strip().split()
entry['k{}.value'.format(i)] = current_line[0]
entry['k{}.grad'.format(i)] = current_line[1]
entry['k{}.std'.format(i)] = current_line[2]
except Exception as e:
traceback.print_exc()
print('Error processing file {}'.format(filepath))
end = time.perf_counter()
return entry, end-start
end = time.perf_counter()
return entry, end-start
#store fitting results
if __name__ == '__main__':
sp.num_repeat_group = 8
repeats = sp.get_repeat_as_per_process(sp.list_process)
res = | df(columns=res_columns) | pandas.DataFrame |
from __future__ import print_function, division
# MIMIC IIIv14 on postgres 9.4
import os, psycopg2, re, sys, time, numpy as np, pandas as pd
from sklearn import metrics
from datetime import datetime
from datetime import timedelta
from os.path import isfile, isdir, splitext
import argparse
import pickle as cPickle
import numpy.random as npr
import spacy
# TODO(mmd): Upgrade to python 3 and use scispacy (requires python 3.6)
import scispacy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datapackage_io_util import (
load_datapackage_schema,
load_sanitized_df_from_csv,
save_sanitized_df_to_csv,
sanitize_df,
)
from heuristic_sentence_splitter import sent_tokenize_rules
from mimic_querier import *
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SQL_DIR = os.path.join(CURRENT_DIR, 'SQL_Queries')
STATICS_QUERY_PATH = os.path.join(SQL_DIR, 'statics.sql')
CODES_QUERY_PATH = os.path.join(SQL_DIR, 'codes.sql')
NOTES_QUERY_PATH = os.path.join(SQL_DIR, 'notes.sql')
# Output filenames
static_filename = 'static_data.csv'
static_columns_filename = 'static_colnames.txt'
dynamic_filename = 'vitals_hourly_data.csv'
columns_filename = 'vitals_colnames.txt'
subjects_filename = 'subjects.npy'
times_filename = 'fenceposts.npy'
dynamic_hd5_filename = 'vitals_hourly_data.h5'
dynamic_hd5_filt_filename = 'all_hourly_data.h5'
codes_hd5_filename = 'C.h5'
notes_hd5_filename = 'notes.hdf' # N.h5
idx_hd5_filename = 'C_idx.h5'
outcome_filename = 'outcomes_hourly_data.csv'
outcome_hd5_filename = 'outcomes_hourly_data.h5'
outcome_columns_filename = 'outcomes_colnames.txt'
# SQL command params
ID_COLS = ['subject_id', 'hadm_id', 'icustay_id']
ITEM_COLS = ['itemid', 'label', 'LEVEL1', 'LEVEL2']
def add_outcome_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
on_hrs = set()
for index, row in out_gb.iterrows():
on_hrs.update(range(row['starttime'], row['endtime'] + 1))
off_hrs = set(range(max_hrs + 1)) - on_hrs
on_vals = [0]*len(off_hrs) + [1]*len(on_hrs)
hours = list(off_hrs) + list(on_hrs)
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hours, 'on':on_vals}) #icustay_id': icustay_id})
def add_blank_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
#icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
hrs = range(max_hrs + 1)
vals = list([0]*len(hrs))
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hrs, 'on':vals})#'icustay_id': icustay_id,
def continuous_outcome_processing(out_data, data, icustay_timediff):
"""
Args
----
out_data : pd.DataFrame
index=None
Contains subset of icustay_id corresp to specific sessions where outcome observed.
data : pd.DataFrame
index=icustay_id
Contains full population of static demographic data
Returns
-------
out_data : pd.DataFrame
"""
out_data['intime'] = out_data['icustay_id'].map(data['intime'].to_dict())
out_data['outtime'] = out_data['icustay_id'].map(data['outtime'].to_dict())
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
out_data['starttime'] = out_data['starttime'] - out_data['intime']
out_data['starttime'] = out_data.starttime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data['endtime'] = out_data['endtime'] - out_data['intime']
out_data['endtime'] = out_data.endtime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data = out_data.groupby(['icustay_id'])
return out_data
#
def fill_missing_times(df_by_sid_hid_itemid):
max_hour = df_by_sid_hid_itemid.index.get_level_values(max_hours)[0]
missing_hours = list(set(range(max_hour+1)) - set(df_by_sid_hid_itemid['hours_in'].unique()))
# Add rows
sid = df_by_sid_hid_itemid.subject_id.unique()[0]
hid = df_by_sid_hid_itemid.hadm_id.unique()[0]
icustay_id = df_by_sid_hid_itemid.icustay_id.unique()[0]
itemid = df_by_sid_hid_itemid.itemid.unique()[0]
filler = pd.DataFrame({'subject_id':[sid]*len(missing_hours),
'hadm_id':[hid]*len(missing_hours),
'icustay_id':[icustay_id]*len(missing_hours),
'itemid':[itemid]*len(missing_hours),
'hours_in':missing_hours,
'value':[np.nan]*len(missing_hours),
'max_hours': [max_hour]*len(missing_hours)})
return pd.concat([df_by_sid_hid_itemid, filler], axis=0)
def save_pop(
data_df, outPath, static_filename, pop_size_int,
static_data_schema, host=None
):
# Connect to local postgres version of mimic
# Serialize to disk
csv_fpath = os.path.join(outPath, static_filename)
save_sanitized_df_to_csv(csv_fpath, data_df, static_data_schema)
return data_df
# From Dave's approach!
def get_variable_mapping(mimic_mapping_filename):
# Read in the second level mapping of the itemids
var_map = pd.read_csv(mimic_mapping_filename, index_col=None)
var_map = var_map.ix[(var_map['LEVEL2'] != '') & (var_map['COUNT']>0)]
var_map = var_map.ix[(var_map['STATUS'] == 'ready')]
var_map['ITEMID'] = var_map['ITEMID'].astype(int)
return var_map
def get_variable_ranges(range_filename):
# Read in the second level mapping of the itemid, and take those values out
columns = [ 'LEVEL2', 'OUTLIER LOW', 'VALID LOW', 'IMPUTE', 'VALID HIGH', 'OUTLIER HIGH' ]
to_rename = dict(zip(columns, [ c.replace(' ', '_') for c in columns ]))
to_rename['LEVEL2'] = 'VARIABLE'
var_ranges = pd.read_csv(range_filename, index_col=None)
var_ranges = var_ranges[columns]
var_ranges.rename(columns=to_rename, inplace=True)
var_ranges = var_ranges.drop_duplicates(subset='VARIABLE', keep='first')
var_ranges['VARIABLE'] = var_ranges['VARIABLE'].str.lower()
var_ranges.set_index('VARIABLE', inplace=True)
var_ranges = var_ranges.loc[var_ranges.notnull().all(axis=1)]
return var_ranges
UNIT_CONVERSIONS = [
('weight', 'oz', None, lambda x: x/16.*0.45359237),
('weight', 'lbs', None, lambda x: x*0.45359237),
('fraction inspired oxygen', None, lambda x: x > 1, lambda x: x/100.),
('oxygen saturation', None, lambda x: x <= 1, lambda x: x*100.),
('temperature', 'f', lambda x: x > 79, lambda x: (x - 32) * 5./9),
('height', 'in', None, lambda x: x*2.54),
]
def standardize_units(X, name_col='itemid', unit_col='valueuom', value_col='value', inplace=True):
if not inplace: X = X.copy()
name_col_vals = get_values_by_name_from_df_column_or_index(X, name_col)
unit_col_vals = get_values_by_name_from_df_column_or_index(X, unit_col)
try:
name_col_vals = name_col_vals.str
unit_col_vals = unit_col_vals.str
except:
print("Can't call *.str")
print(name_col_vals)
print(unit_col_vals)
raise
#name_filter, unit_filter = [
# (lambda n: col.contains(n, case=False, na=False)) for col in (name_col_vals, unit_col_vals)
#]
# TODO(mmd): Why does the above not work, but the below does?
name_filter = lambda n: name_col_vals.contains(n, case=False, na=False)
unit_filter = lambda n: unit_col_vals.contains(n, case=False, na=False)
for name, unit, rng_check_fn, convert_fn in UNIT_CONVERSIONS:
name_filter_idx = name_filter(name)
needs_conversion_filter_idx = name_filter_idx & False
if unit is not None: needs_conversion_filter_idx |= name_filter(unit) | unit_filter(unit)
if rng_check_fn is not None: needs_conversion_filter_idx |= rng_check_fn(X[value_col])
idx = name_filter_idx & needs_conversion_filter_idx
X.loc[idx, value_col] = convert_fn(X[value_col][idx])
return X
def range_unnest(df, col, out_col_name=None, reset_index=False):
assert len(df.index.names) == 1, "Does not support multi-index."
if out_col_name is None: out_col_name = col
col_flat = pd.DataFrame(
[[i, x] for i, y in df[col].iteritems() for x in range(y+1)],
columns=[df.index.names[0], out_col_name]
)
if not reset_index: col_flat = col_flat.set_index(df.index.names[0])
return col_flat
# TODO(mmd): improve args
def save_numerics(
data, X, I, var_map, var_ranges, outPath, dynamic_filename, columns_filename, subjects_filename,
times_filename, dynamic_hd5_filename, group_by_level2, apply_var_limit, min_percent
):
assert len(data) > 0 and len(X) > 0, "Must provide some input data to process."
var_map = var_map[
['LEVEL2', 'ITEMID', 'LEVEL1']
].rename_axis(
{'LEVEL2': 'LEVEL2', 'LEVEL1': 'LEVEL1', 'ITEMID': 'itemid'}, axis=1
).set_index('itemid')
X['value'] = pd.to_numeric(X['value'], 'coerce')
X.astype({k: int for k in ID_COLS}, inplace=True)
to_hours = lambda x: max(0, x.days*24 + x.seconds // 3600)
X = X.set_index('icustay_id').join(data[['intime']])
X['hours_in'] = (X['charttime'] - X['intime']).apply(to_hours)
X.drop(columns=['charttime', 'intime'], inplace=True)
X.set_index('itemid', append=True, inplace=True)
# Pandas has a bug with the below for small X
#X = X.join([var_map, I]).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
X = X.join(var_map).join(I).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
standardize_units(X, name_col='LEVEL1', inplace=True)
if apply_var_limit > 0:
X = apply_variable_limits(X, var_ranges, 'LEVEL2')
group_item_cols = ['LEVEL2'] if group_by_level2 else ITEM_COLS
X = X.groupby(ID_COLS + group_item_cols + ['hours_in']).agg(['mean', 'std', 'count'])
X.columns = X.columns.droplevel(0)
X.columns.names = ['Aggregation Function']
data['max_hours'] = (data['outtime'] - data['intime']).apply(to_hours)
# TODO(mmd): Maybe can just create the index directly?
missing_hours_fill = range_unnest(data, 'max_hours', out_col_name='hours_in', reset_index=True)
missing_hours_fill['tmp'] = np.NaN
# TODO(mmd): The below is a bit wasteful.
#itemids = var_map.join(I['label']).reset_index()[group_item_cols].drop_duplicates()
#itemids['tmp'] = np.NaN
#missing_hours_fill = missing_hours_fill.merge(itemids, on='tmp', how='outer')
fill_df = data.reset_index()[ID_COLS].join(missing_hours_fill.set_index('icustay_id'), on='icustay_id')
fill_df.set_index(ID_COLS + ['hours_in'], inplace=True)
# Pivot table droups NaN columns so you lose any uniformly NaN.
X = X.unstack(level = group_item_cols)
X.columns = X.columns.reorder_levels(order=group_item_cols + ['Aggregation Function'])
#X = X.reset_index().pivot_table(index=ID_COLS + ['hours_in'], columns=group_item_cols, values=X.columns)
X = X.reindex(fill_df.index)
#X.columns = X.columns.droplevel(0).reorder_levels(order=[1, 0])
#if group_by_level2:
# X.columns.names = ['LEVEL2', 'Aggregation Function'] # Won't work with ungrouped!
#else:
# X.columns.names = ['itemid', 'Aggregation Function']
# X.columms = X.MultiIndex.from_frame(X[ITEM_COLS])
X = X.sort_index(axis=0).sort_index(axis=1)
print("Shape of X : ", X.shape)
# Turn back into columns
if columns_filename is not None:
col_names = [str(x) for x in X.columns.values]
with open(os.path.join(outPath, columns_filename), 'w') as f: f.write('\n'.join(col_names))
# Get the max time for each of the subjects so we can reconstruct!
if subjects_filename is not None:
np.save(os.path.join(outPath, subjects_filename), data['subject_id'].as_matrix())
if times_filename is not None:
np.save(os.path.join(outPath, times_filename), data['max_hours'].as_matrix())
#fix nan in count to be zero
idx = pd.IndexSlice
if group_by_level2:
X.loc[:, idx[:, 'count']] = X.loc[:, idx[:, 'count']].fillna(0)
else:
X.loc[:, idx[:,:,:,:, 'count']] = X.loc[:, idx[:,:,:,:, 'count']].fillna(0)
# Drop columns that have very few recordings
n = round((1-min_percent/100.0)*X.shape[0])
drop_col = []
for k in X.columns:
if k[-1] == 'mean':
if X[k].isnull().sum() > n:
drop_col.append(k[:-1])
X = X.drop(columns = drop_col)
########
if dynamic_filename is not None: np.save(os.path.join(outPath, dynamic_filename), X.as_matrix())
if dynamic_hd5_filename is not None: X.to_hdf(os.path.join(outPath, dynamic_hd5_filename), 'X')
return X
def save_notes(notes, outPath=None, notes_h5_filename=None):
notes_id_cols = list(set(ID_COLS).intersection(notes.columns))# + ['row_id'] TODO: what is row_id?
notes_metadata_cols = ['chartdate', 'charttime', 'category', 'description']
notes.set_index(notes_id_cols + notes_metadata_cols, inplace=True)
# preprocessing!!
# TODO(Scispacy)
# TODO(improve)
# TODO(spell checking)
# TODO(CUIs)
# TODO This takes forever. At the very least add a progress bar.
def sbd_component(doc):
for i, token in enumerate(doc[:-2]):
# define sentence start if period + titlecase token
if token.text == '.' and doc[i+1].is_title:
doc[i+1].sent_start = True
if token.text == '-' and doc[i+1].text != '-':
doc[i+1].sent_start = True
return doc
#convert de-identification text into one token
def fix_deid_tokens(text, processed_text):
deid_regex = r"\[\*\*.{0,15}.*?\*\*\]"
indexes = [m.span() for m in re.finditer(deid_regex,text,flags=re.IGNORECASE)]
for start,end in indexes:
processed_text.merge(start_idx=start,end_idx=end)
return processed_text
nlp = spacy.load('en_core_web_sm') # Maybe try lg model?
nlp.add_pipe(sbd_component, before='parser') # insert before the parser
disabled = nlp.disable_pipes('ner')
def process_sections_helper(section, note, processed_sections):
processed_section = nlp(section['sections'])
processed_section = fix_deid_tokens(section['sections'], processed_section)
processed_sections.append(processed_section)
def process_note_willie_spacy(note):
note_sections = sent_tokenize_rules(note)
processed_sections = []
section_frame = pd.DataFrame({'sections':note_sections})
section_frame.apply(process_sections_helper, args=(note,processed_sections,), axis=1)
return processed_sections
def text_process(sent, note):
sent_text = sent['sents'].text
if len(sent_text) > 0 and sent_text.strip() != '\n':
if '\n'in sent_text:
sent_text = sent_text.replace('\n', ' ')
note['text'] += sent_text + '\n'
def get_sentences(processed_section, note):
sent_frame = pd.DataFrame({'sents': list(processed_section['sections'].sents)})
sent_frame.apply(text_process, args=(note,), axis=1)
def process_frame_text(note):
try:
note_text = str(note['text'])
note['text'] = ''
processed_sections = process_note_willie_spacy(note_text)
ps = {'sections': processed_sections}
ps = pd.DataFrame(ps)
ps.apply(get_sentences, args=(note,), axis=1)
return note
except Exception as e:
print('error', e)
#raise e
notes = notes.apply(process_frame_text, axis=1)
if outPath is not None and notes_h5_filename is not None:
notes.to_hdf(os.path.join(outPath, notes_h5_filename), 'notes')
return notes
def save_icd9_codes(codes, outPath, codes_h5_filename):
codes.set_index(ID_COLS, inplace=True)
codes.to_hdf(os.path.join(outPath, codes_h5_filename), 'C')
return codes
def save_outcome(
data, querier, outPath, outcome_filename, outcome_hd5_filename,
outcome_columns_filename, outcome_schema, host=None
):
""" Retrieve outcomes from DB and save to disk
Vent and vaso are both there already - so pull the start and stop times from there! :)
Returns
-------
Y : Pandas dataframe
Obeys the outcomes data spec
"""
icuids_to_keep = get_values_by_name_from_df_column_or_index(data, 'icustay_id')
icuids_to_keep = set([str(s) for s in icuids_to_keep])
# Add a new column called intime so that we can easily subtract it off
data = data.reset_index()
data = data.set_index('icustay_id')
data['intime'] = pd.to_datetime(data['intime']) #, format="%m/%d/%Y"))
data['outtime'] = pd.to_datetime(data['outtime'])
icustay_timediff_tmp = data['outtime'] - data['intime']
icustay_timediff = pd.Series([timediff.days*24 + timediff.seconds//3600
for timediff in icustay_timediff_tmp], index=data.index.values)
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.ventnum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN ventilation_durations v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
"""
old_template_vars = querier.exclusion_criteria_template_vars
querier.exclusion_criteria_template_vars = dict(icuids=','.join(icuids_to_keep))
vent_data = querier.query(query_string=query)
vent_data = continuous_outcome_processing(vent_data, data, icustay_timediff)
vent_data = vent_data.apply(add_outcome_indicators)
vent_data.rename(columns = {'on':'vent'}, inplace=True)
vent_data = vent_data.reset_index()
# Get the patients without the intervention in there too so that we
ids_with = vent_data['icustay_id']
ids_with = set(map(int, ids_with))
ids_all = set(map(int, icuids_to_keep))
ids_without = (ids_all - ids_with)
#ids_without = map(int, ids_without)
# Create a new fake dataframe with blanks on all vent entries
out_data = data.copy(deep=True)
out_data = out_data.reset_index()
out_data = out_data.set_index('icustay_id')
out_data = out_data.iloc[out_data.index.isin(ids_without)]
out_data = out_data.reset_index()
out_data = out_data[['subject_id', 'hadm_id', 'icustay_id']]
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
# Create all 0 column for vent
out_data = out_data.groupby('icustay_id')
out_data = out_data.apply(add_blank_indicators)
out_data.rename(columns = {'on':'vent'}, inplace=True)
out_data = out_data.reset_index()
# Concatenate all the data vertically
Y = pd.concat([vent_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']],
out_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']]],
axis=0)
# Start merging all other interventions
table_names = [
'vasopressor_durations',
'adenosine_durations',
'dobutamine_durations',
'dopamine_durations',
'epinephrine_durations',
'isuprel_durations',
'milrinone_durations',
'norepinephrine_durations',
'phenylephrine_durations',
'vasopressin_durations'
]
column_names = ['vaso', 'adenosine', 'dobutamine', 'dopamine', 'epinephrine', 'isuprel',
'milrinone', 'norepinephrine', 'phenylephrine', 'vasopressin']
# TODO(mmd): This section doesn't work. What is its purpose?
for t, c in zip(table_names, column_names):
# TOTAL VASOPRESSOR DATA
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.vasonum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
"""
new_data = querier.query(query_string=query, extra_template_vars=dict(table=t))
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns={'on': c}, inplace=True)
new_data = new_data.reset_index()
# c may not be in Y if we are only extracting a subset of the population, in which c was never
# performed.
if not c in new_data:
print("Column ", c, " not in data.")
continue
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', c]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[c] = Y[c].astype(int)
#Y = Y.sort_values(['subject_id', 'icustay_id', 'hours_in']) #.merge(df3,on='name')
Y = Y.reset_index(drop=True)
print('Extracted ' + c + ' from ' + t)
tasks=["colloid_bolus", "crystalloid_bolus", "nivdurations"]
for task in tasks:
if task=='nivdurations':
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
"""
else:
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.charttime AS starttime,
v.charttime AS endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.charttime between intime and outtime
"""
new_data = querier.query(query_string=query, extra_template_vars=dict(table=task))
if new_data.shape[0] == 0: continue
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns = {'on':task}, inplace=True)
new_data = new_data.reset_index()
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', task]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[task] = Y[task].astype(int)
Y = Y.reset_index(drop=True)
print('Extracted ' + task)
# TODO: ADD THE RBC/PLT/PLASMA DATA
# TODO: ADD DIALYSIS DATA
# TODO: ADD INFECTION DATA
# TODO: Move queries to files
querier.exclusion_criteria_template_vars = old_template_vars
Y = Y.filter(items=['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent'] + column_names + tasks)
Y.subject_id = Y.subject_id.astype(int)
Y.icustay_id = Y.icustay_id.astype(int)
Y.hours_in = Y.hours_in.astype(int)
Y.vent = Y.vent.astype(int)
Y.vaso = Y.vaso.astype(int)
y_id_cols = ID_COLS + ['hours_in']
Y = Y.sort_values(y_id_cols)
Y.set_index(y_id_cols, inplace=True)
print('Shape of Y : ', Y.shape)
# SAVE AS NUMPY ARRAYS AND TEXT FILES
#np_Y = Y.as_matrix()
#np.save(os.path.join(outPath, outcome_filename), np_Y)
# Turn back into columns
df = Y.reset_index()
df = sanitize_df(df, outcome_schema)
csv_fpath = os.path.join(outPath, outcome_filename)
save_sanitized_df_to_csv(csv_fpath, df, outcome_schema)
col_names = list(df.columns.values)
col_names = col_names[3:]
with open(os.path.join(outPath, outcome_columns_filename), 'w') as f:
f.write('\n'.join(col_names))
# TODO(mmd): Why does df have the index? Is sanitize making multiindex?
# SAVE THE DATA AS A PANDAS OBJECT
# TODO(<NAME>): Why writing out Y after you've separately sanitized df?
Y.to_hdf(os.path.join(outPath, outcome_hd5_filename), 'Y')
return df
# Apply the variable limits to remove things
# TODO(mmd): controlled printing.
def apply_variable_limits(df, var_ranges, var_names_index_col='LEVEL2'):
idx_vals = df.index.get_level_values(var_names_index_col)
non_null_idx = ~df.value.isnull()
var_names = set(idx_vals)
var_range_names = set(var_ranges.index.values)
for var_name in var_names:
var_name_lower = var_name.lower()
if var_name_lower not in var_range_names:
print("No known ranges for %s" % var_name)
continue
outlier_low_val, outlier_high_val, valid_low_val, valid_high_val = [
var_ranges.loc[var_name_lower, x] for x in ('OUTLIER_LOW','OUTLIER_HIGH','VALID_LOW','VALID_HIGH')
]
running_idx = non_null_idx & (idx_vals == var_name)
outlier_low_idx = (df.value < outlier_low_val)
outlier_high_idx = (df.value > outlier_high_val)
valid_low_idx = ~outlier_low_idx & (df.value < valid_low_val)
valid_high_idx = ~outlier_high_idx & (df.value > valid_high_val)
var_outlier_idx = running_idx & (outlier_low_idx | outlier_high_idx)
var_valid_low_idx = running_idx & valid_low_idx
var_valid_high_idx = running_idx & valid_high_idx
df.loc[var_outlier_idx, 'value'] = np.nan
df.loc[var_valid_low_idx, 'value'] = valid_low_val
df.loc[var_valid_high_idx, 'value'] = valid_high_val
n_outlier = sum(var_outlier_idx)
n_valid_low = sum(var_valid_low_idx)
n_valid_high = sum(var_valid_high_idx)
if n_outlier + n_valid_low + n_valid_high > 0:
print(
"%s had %d / %d rows cleaned:\n"
" %d rows were strict outliers, set to np.nan\n"
" %d rows were low valid outliers, set to %.2f\n"
" %d rows were high valid outliers, set to %.2f\n"
"" % (
var_name,
n_outlier + n_valid_low + n_valid_high, sum(running_idx),
n_outlier, n_valid_low, valid_low_val, n_valid_high, valid_high_val
)
)
return df
def plot_variable_histograms(col_names, df):
# Plot some of the data, just to make sure it looks ok
for c, vals in df.iteritems():
n = vals.dropna().count()
if n < 2: continue
# get median, variance, skewness
med = vals.dropna().median()
var = vals.dropna().var()
skew = vals.dropna().skew()
# plot
fig = plt.figure(figsize=(13, 6))
plt.subplots(figsize=(13,6))
vals.dropna().plot.hist(bins=100, label='HIST (n={})'.format(n))
# fake plots for KS test, median, etc
plt.plot([], label=' ',color='lightgray')
plt.plot([], label='Median: {}'.format(format(med,'.2f')),
color='lightgray')
plt.plot([], label='Variance: {}'.format(format(var,'.2f')),
color='lightgray')
plt.plot([], label='Skew: {}'.format(format(skew,'.2f')),
color='light:gray')
# add title, labels etc.
plt.title('{} measurements in ICU '.format(str(c)))
plt.xlabel(str(c))
plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=12)
plt.xlim(0, vals.quantile(0.99))
fig.savefig(os.path.join(outPath, (str(c) + '_HIST_.png')), bbox_inches='tight')
# Main, where you can call what makes sense.
if __name__ == '__main__':
print("Running!")
# Construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('--out_path', type=str, default= '/scratch/{}/phys_acuity_modelling/data'.format(os.environ['USER']),
help='Enter the path you want the output')
ap.add_argument('--resource_path',
type=str,
default=os.path.expandvars("$MIMIC_EXTRACT_CODE_DIR/resources/"))
ap.add_argument('--queries_path',
type=str,
default=os.path.expandvars("$MIMIC_EXTRACT_CODE_DIR/SQL_Queries/"))
ap.add_argument('--extract_pop', type=int, default=1,
help='Whether or not to extract population data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_numerics', type=int, default=1,
help='Whether or not to extract numerics data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_outcomes', type=int, default=1,
help='Whether or not to extract outcome data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_codes', type=int, default=1,
help='Whether or not to extract ICD9 codes: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_notes', type=int, default=1,
help='Whether or not to extract notes: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--pop_size', type=int, default=0,
help='Size of population to extract')
ap.add_argument('--exit_after_loading', type=int, default=0)
ap.add_argument('--var_limits', type=int, default=1,
help='Whether to create a version of the data with variable limits included. ' +
'1 - apply variable limits, 0 - do not apply variable limits')
ap.add_argument('--plot_hist', type=int, default=1,
help='Whether to plot the histograms of the data')
ap.add_argument('--psql_host', type=str, default=None,
help='Postgres host. Try "/var/run/postgresql/" for Unix domain socket errors.')
ap.add_argument('--psql_dbname', type=str, default='mimic',
help='Postgres database name.')
ap.add_argument('--psql_schema_name', type=str, default='mimiciii',
help='Postgres database name.')
ap.add_argument('--psql_user', type=str, default=None,
help='Postgres user.')
ap.add_argument('--psql_password', type=str, default=None,
help='Postgres password.')
ap.add_argument('--no_group_by_level2', action='store_false', dest='group_by_level2', default=True,
help="Don't group by level2.")
ap.add_argument('--min_percent', type=float, default=0.0,
help='Minimum percentage of row numbers need to be observations for each numeric column. ' +
'min_percent = 1 means columns with more than 99 percent of nan will be removed. ' +
'Note that as our code does not split the data into train/test sets, ' +
'removing columns in this way prior to train/test splitting yields in a (very minor) ' +
'form of leakage across the train/test set, as the overall missingness measures are used ' +
'that are based on both the train and test sets, rather than just the train set.')
ap.add_argument('--min_age', type=int, default=15,
help='Minimum age of patients to be included')
ap.add_argument('--min_duration', type=int, default=12,
help='Minimum hours of stay to be included')
ap.add_argument('--max_duration', type=int, default=240,
help='Maximum hours of stay to be included')
#############
# Parse args
args = vars(ap.parse_args())
for key in sorted(args.keys()):
print(key, args[key])
if not isdir(args['resource_path']):
raise ValueError("Invalid resource_path: %s" % args['resource_path'])
mimic_mapping_filename = os.path.join(args['resource_path'], 'itemid_to_variable_map.csv')
range_filename = os.path.join(args['resource_path'], 'variable_ranges.csv')
# Load specs for output tables
static_data_schema = load_datapackage_schema(
os.path.join(args['resource_path'], 'static_data_spec.json'))
outcome_data_schema = load_datapackage_schema(
os.path.join(args['resource_path'], 'outcome_data_spec.json'))
if not isdir(args['out_path']):
print('ERROR: OUTPATH %s DOES NOT EXIST' % args['out_path'])
sys.exit()
else:
outPath = args['out_path']
# Modify the filenames
if args['pop_size'] > 0:
pop_size = str(args['pop_size'])
static_filename = splitext(static_filename)[0] + '_' + pop_size + splitext(static_filename)[1]
dynamic_filename = splitext(dynamic_filename)[0] + '_' + pop_size + splitext(dynamic_filename)[1]
#columns_filename = splitext(columns_filename)[0] + '_' + pop_size + splitext(columns_filename)[1]
subjects_filename = splitext(subjects_filename)[0] + '_' + pop_size + splitext(subjects_filename)[1]
times_filename = splitext(times_filename)[0] + '_' + pop_size + splitext(times_filename)[1]
dynamic_hd5_filename = splitext(dynamic_hd5_filename)[0] + '_' + pop_size + splitext(dynamic_hd5_filename)[1]
outcome_filename = splitext(outcome_filename)[0] + '_' + pop_size + splitext(outcome_filename)[1]
dynamic_hd5_filt_filename = splitext(dynamic_hd5_filt_filename)[0] + '_' + pop_size + splitext(dynamic_hd5_filt_filename)[1]
outcome_hd5_filename = splitext(outcome_hd5_filename)[0] + '_' + pop_size + splitext(outcome_hd5_filename)[1]
#outcome_columns_filename = splitext(outcome_columns_filename)[0] + '_' + pop_size + splitext(outcome_columns_filename)[1]
codes_hd5_filename = splitext(codes_hd5_filename)[0] + '_' + pop_size + splitext(codes_hd5_filename)[1]
notes_hd5_filename = splitext(notes_hd5_filename)[0] + '_' + pop_size + splitext(notes_hd5_filename)[1]
idx_hd5_filename = splitext(idx_hd5_filename)[0] + '_' + pop_size + splitext(idx_hd5_filename)[1]
dbname = args['psql_dbname']
schema_name = args['psql_schema_name']
query_args = {'dbname': dbname}
if args['psql_host'] is not None: query_args['host'] = args['psql_host']
if args['psql_user'] is not None: query_args['user'] = args['psql_user']
if args['psql_password'] is not None: query_args['password'] = args['psql_password']
querier = MIMIC_Querier(query_args=query_args, schema_name=schema_name)
#############
# Population extraction
data = None
if (args['extract_pop'] == 0 | (args['extract_pop'] == 1) ) & isfile(os.path.join(outPath, static_filename)):
print("Reloading data from %s" % os.path.join(outPath, static_filename))
data = pd.read_csv(os.path.join(outPath, static_filename))
data = sanitize_df(data, static_data_schema)
elif (args['extract_pop'] == 1 & (not isfile(os.path.join(outPath, static_filename)))) | (args['extract_pop'] == 2):
print("Building data from scratch.")
pop_size_string = ''
if args['pop_size'] > 0:
pop_size_string = 'LIMIT ' + str(args['pop_size'])
min_age_string = str(args['min_age'])
min_dur_string = str(args['min_duration'])
max_dur_string = str(args['max_duration'])
min_day_string = str(float(args['min_duration'])/24)
template_vars = dict(
limit=pop_size_string, min_age=min_age_string, min_dur=min_dur_string, max_dur=max_dur_string,
min_day=min_day_string
)
data_df = querier.query(query_file=STATICS_QUERY_PATH, extra_template_vars=template_vars)
data_df = sanitize_df(data_df, static_data_schema)
print("Storing data @ %s" % os.path.join(outPath, static_filename))
data = save_pop(data_df, outPath, static_filename, args['pop_size'], static_data_schema)
if data is None: print('SKIPPED static_data')
else:
# So all subsequent queries will limit to just that already extracted in data_df.
querier.add_exclusion_criteria_from_df(data, columns=['hadm_id', 'subject_id'])
print("loaded static_data")
#############
# If there is numerics extraction
X = None
if (args['extract_numerics'] == 0 | (args['extract_numerics'] == 1) ) & isfile(os.path.join(outPath, dynamic_hd5_filename)):
print("Reloading X from %s" % os.path.join(outPath, dynamic_hd5_filename))
X = pd.read_hdf(os.path.join(outPath, dynamic_hd5_filename))
elif (args['extract_numerics'] == 1 & (not isfile(os.path.join(outPath, dynamic_hd5_filename)))) | (args['extract_numerics'] == 2):
print("Extracting vitals data...")
start_time = time.time()
########
# Step 1) Get the set of variables we want for the patients we've identified!
icuids_to_keep = get_values_by_name_from_df_column_or_index(data, 'icustay_id')
icuids_to_keep = set([str(s) for s in icuids_to_keep])
data = data.copy(deep=True).reset_index().set_index('icustay_id')
# Select out SID, TIME, ITEMID, VALUE form each of the sources!
var_map = get_variable_mapping(mimic_mapping_filename)
var_ranges = get_variable_ranges(range_filename)
chartitems_to_keep = var_map.loc[var_map['LINKSTO'] == 'chartevents'].ITEMID
chartitems_to_keep = set([ str(i) for i in chartitems_to_keep ])
labitems_to_keep = var_map.loc[var_map['LINKSTO'] == 'labevents'].ITEMID
labitems_to_keep = set([ str(i) for i in labitems_to_keep ])
# TODO(mmd): Use querier, move to file
con = psycopg2.connect(**query_args)
cur = con.cursor()
print(" starting db query with %d subjects..." % (len(icuids_to_keep)))
cur.execute('SET search_path to ' + schema_name)
query = \
"""
select c.subject_id, i.hadm_id, c.icustay_id, c.charttime, c.itemid, c.value, valueuom
FROM icustay_detail i
INNER JOIN chartevents c ON i.icustay_id = c.icustay_id
where c.icustay_id in ({icuids})
and c.itemid in ({chitem})
and c.charttime between intime and outtime
and c.error is distinct from 1
and c.valuenum is not null
UNION ALL
select distinct i.subject_id, i.hadm_id, i.icustay_id, l.charttime, l.itemid, l.value, valueuom
FROM icustay_detail i
INNER JOIN labevents l ON i.hadm_id = l.hadm_id
where i.icustay_id in ({icuids})
and l.itemid in ({lbitem})
and l.charttime between (intime - interval '6' hour) and outtime
and l.valuenum > 0 -- lab values cannot be 0 and cannot be negative
;
""".format(icuids=','.join(icuids_to_keep), chitem=','.join(chartitems_to_keep), lbitem=','.join(labitems_to_keep))
X = | pd.read_sql_query(query, con) | pandas.read_sql_query |
import os
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from scipy.optimize import curve_fit
import shutil
from . import C_preprocessing as preproc
class ODE_POSTPROC:
'''
In this class the output of the ODE is post-processed and the output is written as required by optiSMOKE++
a. Read the Output\Output.OUT file and extract the columns => generate t,W
b. Once extracted: generate the files corresponding to the profile of every species and write them
c. Write "path_to_Exp_Datasets.txt" indicating the experimental dataset considered
d. Copy input_OS.dic in the new folder
'''
def __init__(self,cwd):
'''
Indicates the path where to store the output.
It can be called either at the beginning of the main or at the first iteration
'''
self.cwd = cwd
# preallocate the list of pathways to the experimental datasets you will generate.
# the paths to the OS input correspond to the datasets.
self.path_to_Exp_Datasets = []
self.path_to_OS_inputs = []
def MAKE_BRANCHINGFOLDERS(self,jobtype,REACLUMPED,PRODSLUMPED,T_VECT):
'''
Makes the subfolder "BF_OUTPUT" to store the data of the branching of the lumped products
It also allocates the dictionary where to store the branchings, which will be written at the end of each single-P simulation
NB this function is to be called only if PRODSLUMPED contains arrays. if not: raise exception
'''
self.lumped_branching = {}
# allocate the composition of the lumped reactant
if isinstance(REACLUMPED[0],np.ndarray):
# generate empty dataframe:
self.lumped_branching_reac = pd.DataFrame(index=T_VECT,columns=REACLUMPED[0])
# allocate only the lumped products with arrays
for PRi_L in PRODSLUMPED.index:
# the indexes are the names of the lumped species
if isinstance(PRODSLUMPED[PRi_L],np.ndarray):
# generate empty dataframe
df_PRi_L = pd.DataFrame(index=T_VECT,columns=PRODSLUMPED[PRi_L])
# allocate it in dictionary
self.lumped_branching[PRi_L] = df_PRi_L
# make the folder "Branchings" if not present already
self.branchingpath = os.path.join(self.cwd,jobtype,'BF_OUTPUT',REACLUMPED.index[0])
if os.path.exists(self.branchingpath) == False:
os.makedirs(self.branchingpath)
def MAKE_FOLDERS(self,fld,P,T,REACLUMPED):
'''
Makes the subfolders at the selected conditions of reactant, temperature and pressure
'''
# extract the name of the reactant
REAC = REACLUMPED.index[0]
self.fld = fld
self.dir_PT = os.path.join(fld,str(P) + 'atm',str(T) + 'K')
if os.path.exists(self.dir_PT) == False:
os.makedirs(self.dir_PT)
# Allocate T,P,REAC to "self" for successive use
self.T = T
self.P = P
self.REACNAME = REAC
self.REAC = REACLUMPED[0]
# NB self.REAC is the array of reactants, self.REACNAME is the name (single reactant or R_L)
def EXTRACT_PROFILES(self,SPECIES,i_REAC,N_INIT_REAC,SPECIES_BIMOL_SERIES,ISOM_EQUIL,CUTOFF):
'''
Read the profiles obtained by OpenSMOKE++ and store them into arrays
'''
# if the reaction is bimolecular: read also the xi of the second species.
# for bimolecular lumped species: set i_REAC as the first species
if isinstance(i_REAC,np.ndarray):
i_REAC_0 = i_REAC[0]
else:
i_REAC_0 = i_REAC
if SPECIES_BIMOL_SERIES.iloc[i_REAC_0] != '' and SPECIES_BIMOL_SERIES.iloc[i_REAC_0] != SPECIES[i_REAC_0]:
# if the second reactant has all same fragment: 1 extracolumn
extracol = 1
else:
extracol = 0
# read the output file
filename = os.path.join(self.cwd,'Output','Output.out')
if os.path.isfile(filename):
cols_species = np.arange(9,9+len(SPECIES)+extracol)
n_cols = np.insert(cols_species,0,[0]) # 0 IS THE INDEX, AND [0] IS THE VALUE TO INSERT
data = np.genfromtxt(filename,dtype=float,skip_header=1,usecols=(n_cols))
# extract also PV (needed for the total number of moles)
cols_PV = np.array([5,6],dtype=int)
data_PV = np.genfromtxt(filename,dtype=float,skip_header=1,usecols=(cols_PV))
######## lumped reactant: sum the profiles, redefine a new reactant index and name, new species and species bimol series
if isinstance(i_REAC,np.ndarray):
# 0: for the calculation of the derivatives: compute the absolute variation of each isomer
dreac_i_dt = abs((-data[2:,i_REAC+1]+data[1:-1,i_REAC+1])/(data[2:,0]-data[1:-1,0]).reshape(len(data[2:,0]),1))
dreac_dt = np.sum(dreac_i_dt,axis=1)
# 1: sum the profiles of all the reactants; save the reactant composition for later
Wreac_new = np.sum(data[:,i_REAC+1],axis=1)
Wreac_new = Wreac_new[np.newaxis,:]
self.Wreac_composition = data[:,i_REAC+1]
# 2: delete all the columns corresponding to the reactant and add the reactant in the first column
# do the same for the names of the species and the bimolecular species
data = np.delete(data,i_REAC+1,axis=1)
data = np.insert(data,1,Wreac_new,axis=1)
SPECIES = np.delete(SPECIES,i_REAC)
bim_reac = pd.Series(SPECIES_BIMOL_SERIES[i_REAC_0],index=[self.REACNAME])
SPECIES_BIMOL_SERIES = SPECIES_BIMOL_SERIES[SPECIES]
SPECIES = np.insert(SPECIES,0,self.REACNAME)
SPECIES_BIMOL_SERIES = pd.concat([bim_reac,SPECIES_BIMOL_SERIES])
# 3: assign new indices
i_REAC = 0 # now the reactant is in the first position
reaclumped = 'YES'
else:
# compute the derivative of the reactant consumption
dreac_dt = (-data[2:,i_REAC+1]+data[1:-1,i_REAC+1])/(data[2:,0]-data[1:-1,0])
# save variables forlater
reaclumped = 'NO'
# cut the profiles where needed
i_in = np.where(data[:,i_REAC+1] <= (1-CUTOFF[0])*N_INIT_REAC)
i_fin = np.where(data[:,i_REAC+1] <= (1-CUTOFF[1])*N_INIT_REAC)
# if the reactant does not reach the minimum consumption (possible for lumped reactants): set the initial value as 0
if len(i_in[0]) == 0:
i_in = 0
else:
i_in = i_in[0][0]
# impose to cut when the DERIVATIVE of the reactant (consumption) reaches a small value
if len(i_fin[0]) == 0:
#i_fin = np.where(dreac_dt[3:] < dreac_dt[3]*1e-4)
# remove infinite values and keep only positive values
dreac2_dt = dreac_dt[(np.isinf(dreac_dt)==False) & (dreac_dt > 0)]
if len(dreac2_dt) <= 1:
# include also length = 1 otherwise it means that i_in and i_fin will be the same
i_in = 0
i_fin = len(dreac_dt)
else:
maxderiv = max(dreac2_dt)
minderiv = min(dreac2_dt)
i_in = np.where(dreac_dt==maxderiv)[0][0]
if minderiv <= maxderiv*1e-4 :
cutoff_deriv = dreac2_dt[dreac2_dt < maxderiv*1e-4][0]
i_fin = np.where(dreac_dt==cutoff_deriv)[0][0]
elif minderiv > maxderiv*1e-4 :
i_fin = np.where(dreac_dt==minderiv)[0][0]
else:
i_fin = i_fin[0][0]
# check that i_fin > i_in, otherwise set i_in to 0
if i_fin < i_in:
i_in = 0
# save data in the appropriate range of consumption of the reactant
data = data[i_in:i_fin,:]
data_PV = data_PV[i_in:i_fin,:]
t = data[:,0]
t = t[:,np.newaxis]
# W will have the mole fractions multiplied by PV/RT (CtotV = Ntot)
# IF YOU HAVE AN EXTRA SPECIES (NAMELY: BIMOLECULAR REACTANT), MULTIPLY THE COLUMN OF THE REACTANT BY THAT
W = data[:,1:]
if extracol == 1:
# multiply by the number of the second fragments s.t. you reach the total fraction of N_ABU
W_reac2 = W[:,-1]
W = W[:,:-1]
# self.W will be used to write the profiles to the optimizer, so no modification should be done
self.W = pd.DataFrame(W,columns=SPECIES)
# after this, multiply W[reac] by the bimolecular reactant to obtain xi^2
W[:,i_REAC] = W[:,i_REAC]*W_reac2
else:
self.W = pd.DataFrame(W,columns=SPECIES)
self.t = t
tW_DF = np.concatenate((t,W),axis=1)
tW_DF = pd.DataFrame(tW_DF,columns=np.insert(SPECIES,0,'t'))
# FOR LUMPED REACTANTS: SAVE THE BRANCHING FRACTIONS FOR LATER
if reaclumped == 'YES':
# reactant composition
# print(self.Wreac_composition,i_in,i_fin)
self.Wreac_composition = self.Wreac_composition[i_in:i_fin,:]
# if ISOM_EQUIL is active: take only the last BF
if ISOM_EQUIL == 1:
self.lumped_branching_reac.loc[self.T,self.REAC] = self.Wreac_composition[-1,:]/np.sum(self.Wreac_composition[-1,:])
elif ISOM_EQUIL == 0:
Wreac_tot = np.sum(self.Wreac_composition[1:],axis=1)
Wreac_tot = Wreac_tot[:,np.newaxis]
dtweight = ((self.t[1:]-self.t[:-1])/self.t[-1])
br_weighted = self.Wreac_composition[1:,:]/Wreac_tot*dtweight
self.lumped_branching_reac.loc[self.T,self.REAC] = np.sum(br_weighted,axis=0)
# save the reactant composition separately for plotting
else:
raise ValueError('OS output file not found')
self.tW_DF = tW_DF
# save species and bimol series for the following steps
self.i_REAC = i_REAC
self.SPECIES = SPECIES
self.SPECIES_BIMOL_SERIES = SPECIES_BIMOL_SERIES
return tW_DF,data_PV
def PROFILES_REAC_COMPOSITION(self):
'''
Method to be called only in presence of a lumped reactant.
It returns the profile of lumped set of reactants in case it is needed for later use
'''
try:
tW_DF_reac = np.concatenate((self.t,self.Wreac_composition),axis=1)
tW_DF_reac = pd.DataFrame(tW_DF_reac,columns=np.insert(self.REAC,0,'t'))
return tW_DF_reac
except ValueError as e:
print(str(e))
def LUMP_PROFILES(self,PRODS,PRODSLUMPED):
'''
IN THIS METHOD:
- TAKE THE PROFILES OF self.W and sum those of the lumped products
- redefine self.PRODS as the names of the lumped products and rewrite self.W and tW_DF
NB also include the species that are not part of PRODSLUMPED or reac.
- Allocate the branchings within each lumped products to the appropriate dictionary
- for later processing: define new
REAC_L
i_REAC_L
SPECIES_SERIES_L
SPECIES_BIMOL_SERIES_L
PRODS_L
these have the same format as the initial ones.
'''
self.PRODS = PRODS
self.PRODSLUMPED = PRODSLUMPED
# non-lumped products: return all the values the same as before
if len(self.PRODS) == len(PRODSLUMPED) :
tW_DF = self.tW_DF
i_REAC_L = self.i_REAC
SPECIES_L = self.SPECIES
SPECIES_SERIES_L = pd.Series(np.arange(0,len(self.SPECIES)),index = self.SPECIES)
SPECIES_BIMOL_SERIES_L = self.SPECIES_BIMOL_SERIES
PRODS_L = self.PRODS
else:
# redefine the product names
self.PRODS = np.array(PRODSLUMPED.index, dtype='<U16')
# empty dataframe for the products
W_prods_L = pd.DataFrame(columns=self.PRODS)
# empty series for bimolecular species
PRODS_L_BIMOL = pd.Series(index=self.PRODS)
# lumped products: go over the lumped products and generate W_prods
# delete the corresponding columns in the dataframe
for PRi_L in self.PRODS:
# PRODSLUMPED[PRi_L] will be the value (either string or array, depending on whether the product is lumped or not)
PRi_L_value = PRODSLUMPED[PRi_L]
# if the product is just a string (single product): just save the corresponding line and delete it
if isinstance(PRi_L_value,str):
# NB here we have PRi_L_value==PRi_L, so using one or the other makes no difference
# delete the column from self.W and move it to W_prods_L
W_prods_L[PRi_L] = self.W[PRi_L]
# now delete the column from self.W
self.W = self.W.drop(columns=PRi_L)
# reconstruct the series of bimolecular species
PRODS_L_BIMOL[PRi_L] = self.SPECIES_BIMOL_SERIES[PRi_L]
elif isinstance(PRi_L_value,np.ndarray):
# for each of the products: sum the columns
W_prods_L[PRi_L] = np.sum(self.W[PRi_L_value],axis=1)
# compute the weighted average branching within each product and save them to dataframe
Wtot = W_prods_L[PRi_L].values[1:, np.newaxis]
dtweight = ((self.t[1:]-self.t[:-1])/self.t[-1])
BR_PRi_L = self.W.loc[1:,PRi_L_value]/Wtot*dtweight
self.lumped_branching[PRi_L].loc[self.T,PRi_L_value] = np.sum(BR_PRi_L,axis=0)
# delete the corresponding columns
self.W = self.W.drop(columns=PRi_L_value)
# reconstruct the series of bimolecular species: take the corresponding species of the first lumped species
PRODS_L_BIMOL[PRi_L] = self.SPECIES_BIMOL_SERIES[PRi_L_value[0]]
# now you deleted from self.W all the product profiles. Put them back at the end by concatenating the dataframes
W_noprods = self.W # save it for later
self.W = pd.concat([self.W,W_prods_L],axis=1)
# new dataframe
timeseries = pd.DataFrame(self.t,columns=['t'])
tW_DF = pd.concat([timeseries,self.W],axis=1)
# new names
SPECIES_L = self.W.columns
SPECIES_SERIES_L = pd.Series(np.arange(0,len(SPECIES_L)),index = SPECIES_L)
i_REAC_L = SPECIES_SERIES_L[self.REACNAME]
PRODS_L = self.PRODS
# bimolecular species: # first select the non-product species
SPECIES_BIMOL_L = self.SPECIES_BIMOL_SERIES[W_noprods.columns]
SPECIES_BIMOL_SERIES_L = pd.Series(SPECIES_BIMOL_L,index=W_noprods.columns)
# now concatenate this series with that of the products
SPECIES_BIMOL_SERIES_L = pd.concat([SPECIES_BIMOL_SERIES_L,PRODS_L_BIMOL])
# UPDATE THE VALUE FOR THE FOLLOWING STEPS
self.SPECIES_BIMOL_SERIES = SPECIES_BIMOL_SERIES_L
return tW_DF,self.REACNAME,i_REAC_L,SPECIES_L,SPECIES_SERIES_L,SPECIES_BIMOL_SERIES_L,PRODS_L
def WRITE_PROFILES(self, PRODS, verbose=None):
'''
Writes single files in Output_to_optiSMOKE/P_reac/T
With the profile of each species
Finally write the file Path_to_Exp_Datasets.txt with the names of the files
'''
PRODS = np.array(PRODS,dtype='<U20')
self.PRODS = PRODS
# indices of the reactant and of the products
indices_R_prods = np.insert(self.PRODS,0,self.REACNAME)
# empty matrix for the results
exp_dataset = np.zeros((np.shape(self.t)[0],3*len(indices_R_prods)))
exp_dataset[:,0::3] = self.t # first column with the time
# if you have only 1 species to write:
if verbose:
print(self.W)
print(indices_R_prods)
exp_dataset[:,1::3] = self.W[indices_R_prods]
exp_dataset[:,2::3] = 0.1*np.ones(self.t.shape) # third column with the error
# Write the profiles ONLY FOR THE REACTANT AND THE LUMPED PRODUCTS
header = np.array(['Batch','m_SP',len(self.PRODS)+1],dtype=str)
header = np.insert(indices_R_prods,0,header)
if 3*(len(indices_R_prods))-(3+len(indices_R_prods)) > 0:
emptycols = np.zeros(3*(len(indices_R_prods))-(3+len(indices_R_prods)),dtype='<U20') # empty spaces corresponding to the other columns with no header
header = np.insert(emptycols,0,header)
else:
# for 1 species: the header must have only 3 elements
header = np.array(['Batch m_SP',len(self.PRODS)+1],dtype=str)
header = np.insert(indices_R_prods,0,header)
header = header[np.newaxis,:]
exp_towrite = np.concatenate((header,exp_dataset),axis=0)
np.savetxt(os.path.join(self.dir_PT, str(self.T) + '.txt)'), exp_towrite, delimiter='\t', fmt='%s')
self.path_to_Exp_Datasets.append(os.path.join(str(self.P) + 'atm', str(self.T) + 'K', str(self.T) + '.txt'))
self.path_to_OS_inputs.append(os.path.join(str(self.P) + 'atm', str(self.T) + 'K', 'input_OS.dic'))
def WRITE_BRANCHINGS_PRODS(self, PRODS):
'''
This method writes the profiles of the lumped products in the folder "Branchings"
'''
# products:
if len(self.PRODSLUMPED) != len(PRODS):
for PRi_L in self.lumped_branching:
# make corresponding subfolder if it does not exist
if os.path.isdir(os.path.join(self.branchingpath,PRi_L)) == False:
os.makedirs(os.path.join(self.branchingpath,PRi_L))
fld = os.path.join(self.branchingpath,PRi_L,str(self.P)+'atm.txt')
# concatenate 2 dataframes: write also the values of the temperature
# NB for concatenation along rows, the same index is needed!
T_DF = pd.DataFrame(self.lumped_branching[PRi_L].index,index=self.lumped_branching[PRi_L].index,columns=['T[K]'])
BRall = pd.concat([T_DF,self.lumped_branching[PRi_L]],axis=1)
formats = pd.Series(index=BRall.columns,dtype=str)
formats[self.lumped_branching[PRi_L].columns] = '%1.5f'
formats['T[K]'] = '%d'
formats_list = list(formats.values)
head = '\t'.join(BRall.columns)
np.savetxt(fld,BRall,fmt=formats_list,header=head,comments='\t')
def WRITE_BRANCHINGS_REACS(self):
# lumped reactant:
if isinstance(self.REAC,np.ndarray):
# folder
if os.path.isdir(os.path.join(self.branchingpath,self.REACNAME)) == False:
os.makedirs(os.path.join(self.branchingpath,self.REACNAME))
fld = os.path.join(self.branchingpath,self.REACNAME,str(self.P)+'atm.txt')
#fld = self.branchingpath + '/' + self.REACNAME + '_from' + self.REACNAME + '_' + str(self.P) + 'atm.txt'
# concatenation
T_DF = pd.DataFrame(self.lumped_branching_reac.index,index=self.lumped_branching_reac.index,columns=['T[K]'])
BRall = pd.concat([T_DF,self.lumped_branching_reac],axis=1)
formats = pd.Series(index=BRall.columns,dtype=str)
formats[self.REAC] = '%1.5f'
formats['T[K]'] = '%d'
formats_list = list(formats.values)
head = '\t'.join(BRall.columns)
np.savetxt(fld,BRall,fmt=formats_list,header=head,comments='\t')
return BRall
def WRITE_NEW_OSINPUT(self,N_INIT):
'''
This method writes the new OS input in the selected subfolders
'''
# with the new indices, the reactants and products are lumped
new_indices = np.insert(self.PRODS,0,self.REACNAME)
os.mkdir(os.path.join(self.dir_PT,'inp'))
# Copy the input to OS simulations and substitute the values of interest
shutil.copyfile(os.path.join(self.cwd,'inp','input_OS_template.dic'),os.path.join(self.dir_PT,'inp','input_OS_template.dic'))
# write new input in the selected folder
write_OS_new = preproc.WRITE_OS_INPUT(self.dir_PT,self.T,self.P,new_indices, | pd.Series(self.REACNAME,index=[self.REACNAME]) | pandas.Series |
#!/usr/bin/env python3
import requests
import json
import pandas as pd
import numpy as np
import os
import sys
import time
from datetime import datetime, date
from strava_logging import logger
from db_connection import connect, sql
from location_data import lookup_location
class Athlete:
def __init__(self, **kwargs):
self.conn = self.create_connection()
if kwargs:
self.cond = next(iter(kwargs.keys()))
self.val = next(iter(kwargs.values()))
else:
self.cond = None
self.val = None
self.df = self.return_df()
self.ath_info = self.athlete_info()
@staticmethod
def create_connection():
return connect()
def create_new_athlete(self, athlete_id: int, client_id: int, client_secret: str, refresh_token: str,
firstname: str, lastname: str):
"""
Creates a new athlete in the database.
:param athlete_id: Identifier of the athlete in Strava
:param client_id: ID provided to access the athlete's data in the API
:param client_secret: Secret code for this API user.
:param refresh_token: Token used to refresh the API connection.
:param firstname: First name of the athlete.
:param lastname: Last name of the athlete.
:return:
"""
new_athlete_info = {
'athlete_id': athlete_id,
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'firstname': firstname,
'lastname': lastname
}
df_new = pd.DataFrame(new_athlete_info)
conn = self.create_connection()
df_new.to_sql('athletes', conn, if_exists='append', index=False)
conn.close()
def return_df(self) -> pd.DataFrame:
df = pd.read_sql(sql="""SELECT * FROM athletes""", con=self.conn)
self.close_conn()
if self.cond is not None and self.val is not None and self.cond in df.columns:
df = df.loc[df[self.cond] == self.val]
return df
def athlete_info(self) -> dict:
"""
Returns the athlete's data which will be used in the Activities class.
:return:
"""
return self.df.to_dict(orient='records')[0]
def close_conn(self):
self.conn.close()
class Activities:
def __init__(self, athlete_info: dict):
self.athlete_info = athlete_info
assert self.athlete_info is not None, f"Please provide athlete info. " \
f"Client_id, client_secret and refresh_token required."
self.athlete_id = self.athlete_info['athlete_id']
self.base_url = 'https://www.strava.com/api/v3'
self.refresh_data = self.refresh_api_connection()
self.access_token = self.refresh_data['access_token']
self.headers = {'Authorization': f"Bearer {self.access_token}"}
self.token_expires = self.refresh_data['expires_at']
self.conn = connect()
self.latest_activity = self.get_latest_activity()
self.earliest_activity = self.get_earliest_activity()
self.existing_locations = self.get_existing_locations()
self.existing_gear = self.get_existing_gear()
self.df = pd.DataFrame()
self.df_details = pd.DataFrame()
self.df_km = pd.DataFrame()
self.df_miles = pd.DataFrame()
def refresh_api_connection(self):
"""
Retrieves a new access token from the API. The access token will be used in the headers for later API calls.
:return:
"""
refresh_params = {
'client_id': self.athlete_info.get('client_id'),
'client_secret': self.athlete_info.get('client_secret'),
'refresh_token': self.athlete_info.get('refresh_token'),
'grant_type': 'refresh_token',
}
refresh_response = requests.post(url='https://www.strava.com/oauth/token', params=refresh_params)
assert refresh_response.ok, f"{refresh_response.status_code}, {refresh_response.text}"
refresh_data = json.loads(refresh_response.text)
refresh_data['expires_at_str'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(refresh_data['expires_at']))
logger.info(f"Access token will expire at {refresh_data['expires_at_str']}.")
return refresh_data
def check_expiry(self):
"""
This method checks if the token has expired. If the token has expired, it refreshes the token.
:return:
"""
if time.time() > self.token_expires:
self.refresh_api_connection()
def get_latest_activity(self):
"""
Returns the athlete's latest activity. To be used with the after parameter of get_activities.
:return:
"""
query = sql.text("SELECT MAX(start_date) FROM activities WHERE athlete_id =:ath_id")
return self.conn.execute(query, {'ath_id': self.athlete_info.get('athlete_id')}).fetchone()[0]
def get_earliest_activity(self):
"""
Returns the athlete's earliest activity. To be used with the before parameter of get_activities.
:return:
"""
query = sql.text("SELECT MIN(start_date) FROM activities WHERE athlete_id =:ath_id")
return self.conn.execute(query, {'ath_id': self.athlete_info.get('athlete_id')}).fetchone()[0]
def get_existing_locations(self) -> list:
"""
Returns a list of locations that have already be saved in the database.
:return: A list of tuples with latitude and longitude.
"""
df_l = pd.read_sql('locations', self.conn)
return list(zip(df_l['latitude'], df_l['longitude']))
def get_existing_gear(self) -> list:
"""
Returns a list of gear that is already saved in the database.
:return: List of gear IDs
"""
df_g = pd.read_sql('gear', self.conn)
return df_g['gear_id'].to_list()
def get_activities(self, save_json: bool = False, **kwargs):
"""
Returns data for multiple activities that meet the parameters provided.
The main use case is to retrieve all activities after the athlete's latest activity in the database
therefore the default 'after' value will be the latest start_date. If there are no activities for the athlete
the after value will be None.
The results are concatenated onto the main dataframe with activities.
:param save_json: Option to save API response data as a json file, defaults to False
:param kwargs:
after - return activities after this date provided as datetime, date, or str in 'yyyy-mm-dd' format,
before - return activities before this date provided as datetime, date, or str in 'yyyy-mm-dd' format,
per_page - number of activities per page (default and max are 200 to minimize API calls),
page - starting page number
:return:
"""
after = kwargs.get('after', self.latest_activity)
before = kwargs.get('before', None)
if after is not None:
if isinstance(after, str):
after = datetime.timestamp(datetime.strptime(after, '%Y-%m-%d'))
elif isinstance(after, datetime):
after = datetime.timestamp(after)
elif isinstance(after, date):
after = datetime.timestamp(datetime.combine(after, datetime.min.time()))
if before is not None:
if isinstance(before, str):
before = datetime.timestamp(datetime.strptime(before, '%Y-%m-%d'))
elif isinstance(before, datetime):
after = datetime.timestamp(before)
elif isinstance(before, date):
after = datetime.timestamp(datetime.combine(before, datetime.min.time()))
per_page = kwargs.get('per_page', 200)
page = kwargs.get('page', 1)
response = requests.get(url=f"{self.base_url}/athlete/activities",
headers=self.headers,
params={'after': after, 'before': before, 'per_page': per_page, 'page': page})
assert response.ok, f"{response.status_code}, {response.text}"
response_data = json.loads(response.text)
if save_json:
data_file = os.path.join('Activities', 'activity_lists',
f"activity_list {datetime.now().strftime('%Y-%m-%d %H%M%S')}.json")
with open(data_file, 'w') as f:
json.dump(response_data, f)
if len(response_data) > 0:
self.df = pd.concat([self.df, pd.json_normalize(response_data)]).reset_index(drop=True)
time.sleep(2)
return self.get_activities(page=(page + 1))
def get_activity_ids(self) -> list:
"""
Returns a list of all activity IDs to be used later in get_activity_details
:return: list of all activity IDs
"""
if 'id' in self.df.columns:
return self.df['id'].to_list()
elif 'activity_id' in self.df.columns:
return self.df['activity_id'].to_list()
def get_activity_detail(self, activity_id: int, relevant_fields: list = None, save_json: bool = False):
"""
There are certain items that are only available by calling the API for each activity ID, notably the split info,
the activity description, the perceived exertion, the device name and number of calories. Activity info will
later be joined to main activity data. Splits are saved in separate dataframes for splits_miles and splits_km.
:param activity_id: ID of the activity
:param relevant_fields: List of fields that I am interested in which are not available from activity lists.
Default items are ['id', 'description', 'perceived_exertion', 'device_name', 'calories']
:param save_json: Option to save API response data as a json file, defaults to False
:return:
"""
if relevant_fields is None:
relevant_fields = ['id', 'description', 'perceived_exertion', 'device_name', 'calories']
activity_response = requests.get(url=f"{self.base_url}/activities/{activity_id}", headers=self.headers)
assert activity_response.ok, f"{activity_response.status_code}, {activity_response.text}"
activity_data = json.loads(activity_response.text)
if save_json:
data_file = os.path.join('Activities', 'individual_activities', f"{activity_id}.json")
with open(data_file, 'w') as f:
json.dump(activity_data, f)
relevant_data = {x: activity_data[x] for x in activity_data.keys() if x in relevant_fields}
df_activity_details = pd.DataFrame(relevant_data, index=[0])
df_activity_details.rename(columns={'id': 'activity_id', 'description': 'activity_description'}, inplace=True)
self.df_details = pd.concat([self.df_details, df_activity_details]).reset_index(drop=True)
splits_km = | pd.DataFrame(activity_data['splits_metric']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import task_submit
from task_submit import VGGTask,RESTask,RETask,DENTask,XCETask
import random
import kubernetes
import influxdb
import kubernetes
import signal
#from TimeoutException import TimeoutError,Myhandler
import yaml
import requests
from multiprocessing import Process
import multiprocessing
import urllib
import urllib3
import time
import numpy as np
#from utils import Timer
#from sklearn.externals import joblib
#from sklearn.ensemble import GradientBoostingRegressor
import time
#from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True) #设置print选项的参数
import os
import json
import math
import pandas as pd
import argparse
import random
import multiprocessing
import time
from pytz import UTC
from dateutil import parser
from datetime import datetime
import psutil
import socket
from max_heap import MaxHeap
import worker_queue
# from worker_queue import value_free_load,value_weight_load
from Global_client import Global_Influx
aToken = '<KEY>'
aTokenw = '<KEY>'
LOSSHOST = '192.168.128.21'
LOSSPORT = 12527
#from fps import vggfpmodel,resfpmodel,res2fpmodel,xcefpmodel,denfpmodel
def load_task(params_dict,template_id):
if template_id == 1:
try:
batch_size,flops,params = vggfpmodel.vggfp(**params_dict)
except:
print("报错")
elif template_id == 2:
try:
batch_size,flops,params = resfpmodel.resfp(**params_dict)
except Exception as e:
print(e)
elif template_id == 3:
try:
batch_size,flops,params = res2fpmodel.res2fp(**params_dict)
except Exception as e:
print(e)
else:
try:
batch_size,flops,params = xcefpmodel.xcefp(**params_dict)
except Exception as e:
print(e)
return batch_size,flops,params
def parse():
parser = argparse.ArgumentParser(description="Node Monitor")
parser.add_argument('--save_path', default='/tfdata/nodedata', help='save path')
parser.add_argument('--database',default="NODEMESSAGE",help="save database")
parser.add_argument('--derivation',default=10,help='sampling rate')
parser.add_argument('--measurement',default="NODEMESSAGE",help="save measurement")
# parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')
# parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')
# parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')
# parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')
args = parser.parse_args()
return args
def update_token():
cacheData = os.popen(
"echo $(kubectl describe secret $(kubectl get secret -n kube-system | grep ^admin-user | awk '{print $1}') -n kube-system | grep -E '^token'| awk '{print $2}')").read()
cacheToken = cacheData[:-1]
newToken = str(cacheToken)
return newToken
def make_headers(Token):
text = 'Bearer ' + Token
headers = {'Authorization': text}
return headers
def catch_message(url):
global aToken
aToken = update_token()
headers = make_headers(aToken)
response = requests.get(url,headers=headers,verify=False)
res_json = response.json()
return res_json
def database_create(databasename):
database_list = Global_Influx.Client_all.get_list_database()
creating = True
for db in database_list:
dbl = list(db.values())
if databasename in dbl:
creating = False
break
if creating:
Global_Influx.Client_all.create_database(databasename)
# Global_Influx.Client_all.create_database(databasename)
def match_cpu(raw_data):
cache = raw_data[:-1]
matched_data = math.ceil(int(cache)/1e6)
return matched_data
def match_memory(raw_data):
cache = raw_data[:-2]
matched_data = math.ceil(int(cache)/1024)
return matched_data
def match_timestamp(raw_data):
EPOCH = UTC.localize(datetime.utcfromtimestamp(0))
timestamp = parser.parse(raw_data)
if not timestamp.tzinfo:
print("XXX")
timestamp = UTC.localize(timestamp)
s = (timestamp - EPOCH).total_seconds()
return int(s)
def generate_item(response,measurement):
node_cpu = {}
node_cpu['k8s-master'] = 32000
node_cpu['k8s-worker0'] = 24000
node_cpu['k8s-worker2'] = 24000
node_cpu['k8sworker1'] = 16000
node_cpu['k8s-worker3'] = 24000
node_cpu['k8s-worker4'] = 16000
node_cpu['k8s-worker5'] = 24000
node_memory = {}
node_memory['k8s-master'] = float(251 * 1024)
node_memory['k8s-worker0'] = float(94 * 1024)
node_memory['k8s-worker2'] = float(94 * 1024)
node_memory['k8sworker1'] = float(125 * 1024)
node_memory['k8s-worker3'] = float(94 * 1024)
node_memory['k8s-worker4'] = float(125 * 1024)
node_memory['k8s-worker5'] = float(94 * 1024)
points = []
# content = {}
timestamp = response['items'][0]['metadata']['creationTimestamp']
for item in response['items']:
content = {
'measurement': measurement,
'tags':{
"nodes": item['metadata']['name']
},
'fields': {
'cpu': match_cpu(item['usage']['cpu']),
'memory': match_memory(item['usage']['memory']),
'cpu_percent': float(match_cpu(item['usage']['cpu'])/node_cpu[item['metadata']['name']]),
'memory_percent': float(match_memory(item['usage']['memory']) / node_memory[item['metadata']['name']])
},
'time': match_timestamp(timestamp)
}
points.append(content)
return points
def DeletefromDB(Client,DatabaseName):
databases = Client.get_list_database()
for Cn in databases:
if DatabaseName in Cn.values():
Client.drop_database(DatabaseName)
break
class Node_mess(multiprocessing.Process):
def __init__(self,url,args,tasks,v1):
multiprocessing.Process.__init__(self)
self.url = url
self.args = args
self.derivation = args.derivation
self.time_mess = {}
self.cpu_mess = {}
self.memory_mess = {}
self.cpu_per = {}
self.memory_per = {}
self.node_cpu = {}
self.node_cpu['k8s-master'] = 32000
self.node_cpu['k8s-worker0'] = 24000
self.node_cpu['k8s-worker2'] = 24000
self.node_cpu['k8sworker1'] = 16000
self.node_cpu['k8s-worker3'] = 24000
self.node_cpu['k8s-worker4'] = 16000
self.node_cpu['k8s-worker5'] = 24000
self.node_memory = {}
self.node_memory['k8s-master'] = float(251 * 1024)
self.node_memory['k8s-worker0'] = float(94 * 1024)
self.node_memory['k8s-worker2'] = float(94 * 1024)
self.node_memory['k8sworker1'] = float(125 * 1024)
self.node_memory['k8s-worker3'] = float(94 * 1024)
self.node_memory['k8s-worker4'] = float(125 * 1024)
self.node_memory['k8s-worker5'] = float(94 * 1024)
# self.derivation = derivation
self.arg = args
self.tasks = tasks
self.v1 = v1
self.database = args.database
self.measurement = args.measurement
self.save_path = args.save_path
if not os.path.exists(self.arg.save_path):
os.makedirs(self.arg.save_path)
database_create(self.database)
self.client = influxdb.InfluxDBClient('192.168.128.10',port=8086,username='admin',password='<PASSWORD>',database=self.database)
#derivation
# def node_measurement(self,node_list):
# # Global_Influx.Client_all.get_list_measurements()
def run(self):
print(multiprocessing.current_process().pid)
print(os.getpid())
response = catch_message(self.url)
self.time_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
for item in response['items']:
self.time_mess[item['metadata']['name']] = [item['timestamp']]
self.cpu_mess[item['metadata']['name']] = [match_cpu(item['usage']['cpu'])]
self.memory_mess[item['metadata']['name']] = [match_memory(item['usage']['memory'])]
self.cpu_per[item['metadata']['name']] = [float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']])]
self.memory_per[item['metadata']['name']] = [float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']])]
self.client.write_points(generate_item(response,self.measurement),'s',database=self.database)
time.sleep(self.derivation)
while True:
response = catch_message(self.url)
self.time_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
for item in response['items']:
self.time_mess[item['metadata']['name']].append(item['timestamp'])
self.cpu_mess[item['metadata']['name']].append(match_cpu(item['usage']['cpu']))
self.memory_mess[item['metadata']['name']].append(match_memory(item['usage']['memory']))
self.cpu_per[item['metadata']['name']].append(float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']]))
self.memory_per[item['metadata']['name']].append(float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']]))
self.client.write_points(generate_item(response, self.measurement), 's', database=self.database)
if len(self.time_mess['creation'])%30==0 and len(self.time_mess['creation']) > 0:
data_frame = pd.DataFrame(self.time_mess)
data_frame.to_csv(self.save_path + '/' + 'struct.csv', mode='a+', index=False, sep=',')
print(self.cpu_mess)
print(len(self.cpu_mess))
for keyss in self.cpu_mess:
print(keyss+": "+str(len(self.cpu_mess[keyss])))
data_frame2 = pd.DataFrame(self.cpu_mess)
data_frame2.to_csv(self.save_path + '/' + 'node_cpu.csv', mode='a+', index=False, sep=',')
data_frame3 = pd.DataFrame(self.memory_mess)
data_frame3.to_csv(self.save_path + '/' + 'node_memory.csv', mode='a+', index=False, sep=',')
data_frame4 = | pd.DataFrame(self.cpu_per) | pandas.DataFrame |
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.indexes.timedeltas import timedelta_range
def test_asfreq_bug():
df = DataFrame(data=[1, 3], index=[timedelta(), timedelta(minutes=3)])
result = df.resample("1T").asfreq()
expected = DataFrame(
data=[1, np.nan, np.nan, 3],
index=timedelta_range("0 day", periods=4, freq="1T"),
)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13223
index = pd.to_timedelta(["0s", pd.NaT, "2s"])
result = DataFrame({"value": [2, 3, 5]}, index).resample("1s").mean()
expected = DataFrame(
{"value": [2.5, np.nan, 5.0]},
index=timedelta_range("0 day", periods=3, freq="1S"),
)
tm.assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod():
# GH 13022
index = timedelta_range("00:00:00", "00:10:00", freq="5T")
df = DataFrame(data={"value": [1, 5, 10]}, index=index)
result = df.resample("2T").asfreq()
expected_data = {"value": [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(
data=expected_data, index= | timedelta_range("00:00:00", "00:10:00", freq="2T") | pandas.core.indexes.timedeltas.timedelta_range |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(pd.ewma(SOk, span=n, min_periods=n - 1), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = pd.Series(pd.rolling_mean(df[key], timeperiod, min_periods=timeperiod), name='SMA_' + str(timeperiod))
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = pd.ewma(df['Close'], span=n, min_periods=n - 1)
EX2 = pd.ewma(EX1, span=n, min_periods=n - 1)
EX3 = pd.ewma(EX2, span=n, min_periods=n - 1)
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span=n, min_periods=n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1) / ATR,name='PosDI')
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1) / ATR,name='NegDI')
result = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span=n_ADX, min_periods=n_ADX - 1), name='ADX_' + str(n) + '_' + str(n_ADX))
result = pd.concat([df,PosDI,NegDI,result], join='outer', axis=1,ignore_index=True)
result.columns=["High","Low","Close","PosDI","NegDI","ADX"]
return result
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(pd.ewma(df[price], span=n_fast, min_periods=n_slow - 1))
EMAslow = pd.Series(pd.ewma(df[price], span=n_slow, min_periods=n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span=9, min_periods=8), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span=9, min_periods=8)
EX2 = pd.ewma(EX1, span=9, min_periods=8)
Mass = EX1 / EX2
result = pd.Series(pd.rolling_sum(Mass, 25), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
result = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iloc[i + 1]['High'] - df.iloc[i]['High']
DoMove = df.iloc[i]['Low'] - df.iloc[i + 1]['Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1))
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1))
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(pd.ewma(M, span=r, min_periods=r - 1))
aEMA1 = pd.Series(pd.ewma(aM, span=r, min_periods=r - 1))
EMA2 = pd.Series(pd.ewma(EMA1, span=s, min_periods=s - 1))
aEMA2 = pd.Series(pd.ewma(aEMA1, span=s, min_periods=s - 1))
result = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
return out(SETTINGS, df, result)
def ACCDIST(df, n):
"""
Accumulation/Distribution
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
result = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
return out(SETTINGS, df, result)
def Chaikin(df):
"""
Chaikin Oscillator
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
result = pd.Series(pd.ewma(ad, span=3, min_periods=2) - pd.ewma(ad, span=10, min_periods=9), name='Chaikin')
return out(SETTINGS, df, result)
def MFI(df, n):
"""
Money Flow Index and Ratio
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < len(df) - 1: # df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i=i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
result = pd.Series(pd.rolling_mean(MFR, n), name='MFI_' + str(n))
return out(SETTINGS, df, result)
def OBV(df, n):
"""
On-balance Volume
"""
i = 0
OBV = [0]
while i < len(df) - 1: # df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
result = pd.Series(pd.rolling_mean(OBV, n), name='OBV_' + str(n))
return out(SETTINGS, df, result)
def FORCE(df, n):
"""
Force Index
"""
result = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
return out(SETTINGS, df, result)
def EOM(df, n):
"""
Ease of Movement
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
result = pd.Series( | pd.rolling_mean(EoM, n) | pandas.rolling_mean |
#!/usr/bin/env python
r"""Test :py:class:`~solarwindpy.core.vector.Vector` and :py:class:`~solarwindpy.core.tensor.Tensor`.
"""
import pdb
# import re as re
import numpy as np
import pandas as pd
import unittest
import sys
import pandas.testing as pdt
from unittest import TestCase
from abc import ABC, abstractproperty
from scipy import constants
# import test_base as base
from solarwindpy.tests import test_base as base
from solarwindpy import vector
from solarwindpy import tensor
pd.set_option("mode.chained_assignment", "raise")
class QuantityTestBase(ABC):
def test_data(self):
data = self.data
if isinstance(data, pd.Series):
pdt.assert_series_equal(data, self.object_testing.data)
else:
pdt.assert_frame_equal(data, self.object_testing.data)
def test_eq(self):
print_inline_debug = False
object_testing = self.object_testing
# ID should be equal.
self.assertEqual(object_testing, object_testing)
# Data and type should be equal.
new_object = object_testing.__class__(object_testing.data)
if print_inline_debug:
print(
"<Test>",
"<object_testing>",
type(object_testing),
object_testing,
object_testing.data,
"<new_object>",
type(new_object),
new_object,
new_object.data,
"",
sep="\n",
)
self.assertEqual(object_testing, new_object)
def test_neq(self):
object_testing = self.object_testing
# Data isn't equal
self.assertNotEqual(
object_testing, object_testing.__class__(object_testing.data * 4)
)
# Type isn't equal
for other in (
[],
tuple(),
np.array([]),
pd.Series(dtype=np.float64),
pd.DataFrame(dtype=np.float64),
):
self.assertNotEqual(object_testing, other)
def test_empty_data_catch(self):
with self.assertRaisesRegex(
ValueError, "You can't set an object with empty data."
):
self.object_testing.__class__(pd.DataFrame())
#####
# Vectors
#####
class VectorTestBase(QuantityTestBase):
def test_components(self):
# print("test_components")
# print(self.data.iloc[:, :7], flush=True)
v = self.data
# print(v, file=sys.stdout)
pdt.assert_series_equal(v.x, self.object_testing.x)
pdt.assert_series_equal(v.y, self.object_testing.y)
pdt.assert_series_equal(v.z, self.object_testing.z)
def test_mag(self):
# print("test_mag")
# print(self.data.iloc[:, :7], flush=True)
x = self.data.x
y = self.data.y
z = self.data.z
# print(v, file=sys.stdout)
mag = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
# mag = self.data.loc[:, ["x", "y", "z"]].pow(2).sum(axis=1).pipe(np.sqrt)
mag.name = "mag"
# print("", self.data, mag, self.object_testing.mag, sep="\n")
pdt.assert_series_equal(mag, self.object_testing.mag)
pdt.assert_series_equal(mag, self.object_testing.magnitude)
pdt.assert_series_equal(self.object_testing.mag, self.object_testing.magnitude)
def test_rho(self):
# print("test_rho")
x = self.data.x
y = self.data.y
rho = np.sqrt(x.pow(2) + y.pow(2))
rho.name = "rho"
pdt.assert_series_equal(rho, self.object_testing.rho)
def test_colat(self):
# print("test_colat")
x = self.data.x
y = self.data.y
z = self.data.z
colat = np.arctan2(z, np.sqrt(x.pow(2) + y.pow(2)))
colat = np.rad2deg(colat)
colat.name = "colat"
pdt.assert_series_equal(colat, self.object_testing.colat)
def test_longitude(self):
# print("test_longitude")
x = self.data.x
y = self.data.y
lon = np.arctan2(y, x)
lon = np.rad2deg(lon)
lon.name = "longitude"
pdt.assert_series_equal(lon, self.object_testing.lon)
pdt.assert_series_equal(lon, self.object_testing.longitude)
pdt.assert_series_equal(self.object_testing.lon, self.object_testing.longitude)
def test_r(self):
# print("test_r")
x = self.data.x
y = self.data.y
z = self.data.z
r = np.sqrt(x.pow(2) + y.pow(2) + z.pow(2))
r.name = "r"
pdt.assert_series_equal(r, self.object_testing.r)
pdt.assert_series_equal(r, self.object_testing.mag, check_names=False)
pdt.assert_series_equal(
self.object_testing.r, self.object_testing.mag, check_names=False
)
def test_cartesian(self):
v = self.data.loc[:, ["x", "y", "z"]]
pdt.assert_frame_equal(v, self.object_testing.cartesian)
def test_unit_vector(self):
v = self.data.loc[:, ["x", "y", "z"]]
mag = v.pow(2).sum(axis=1).pipe(np.sqrt)
uv = v.divide(mag, axis=0)
uv.name = "uv"
uv = vector.Vector(uv)
pdt.assert_frame_equal(uv.data, self.object_testing.unit_vector.data)
pdt.assert_frame_equal(uv.data, self.object_testing.uv.data)
pdt.assert_frame_equal(
self.object_testing.uv.data, self.object_testing.unit_vector.data
)
self.assertEqual(uv, self.object_testing.unit_vector)
self.assertEqual(uv, self.object_testing.uv)
self.assertEqual(self.object_testing.unit_vector, self.object_testing.uv)
def test_project(self):
b = (
base.TestData()
.plasma_data.xs("b", axis=1, level="M")
.xs("", axis=1, level="S")
.loc[:, ["x", "y", "z"]]
)
# b.setUpClass()
# b = (
# b.data.b.loc[:, ["x", "y", "z"]]
# .xs("", axis=1, level="S")
# .xs("", axis=1, level="N")
# )
bmag = b.pow(2).sum(axis=1).pipe(np.sqrt)
buv = b.divide(bmag, axis=0)
v = self.data.loc[:, ["x", "y", "z"]]
vmag = v.pow(2).sum(axis=1).pipe(np.sqrt)
# vuv = v.divide(vmag, axis=0)
par = v.multiply(buv, axis=1).sum(axis=1)
per = (
v.subtract(buv.multiply(par, axis=0), axis=1)
.pow(2)
.sum(axis=1)
.pipe(np.sqrt)
)
projected = pd.concat([par, per], axis=1, keys=["par", "per"], sort=True)
# print("",
# "<Test>",
# "<buv>", type(buv), buv,
# "<v>", type(v), v,
# "<vmag>", type(vmag), vmag,
# "<vuv>", type(vuv), vuv,
# "<projected>", type(projected), projected,
# "",
# sep="\n")
b = vector.Vector(b)
pdt.assert_frame_equal(projected, self.object_testing.project(b))
# Projecting a thing onto itself should return 1 for parallel
# and 0 for perp.
per = pd.Series(0.0, index=per.index)
projected = pd.concat([vmag, per], axis=1, keys=["par", "per"], sort=True)
pdt.assert_frame_equal(
projected, self.object_testing.project(self.object_testing)
)
msg = "`project` method needs"
with self.assertRaisesRegex(NotImplementedError, msg):
self.object_testing.project(b.data)
def test_cos_theta(self):
# b = base.TestData()
# b.setUpClass()
# b = (
# b.data.b.loc[:, ["x", "y", "z"]]
# .xs("", axis=1, level="S")
# .xs("", axis=1, level="N")
# )
b = (
base.TestData()
.plasma_data.xs("b", axis=1, level="M")
.xs("", axis=1, level="S")
.loc[:, ["x", "y", "z"]]
)
bmag = b.pow(2).sum(axis=1).pipe(np.sqrt)
buv = b.divide(bmag, axis=0)
v = self.data.loc[:, ["x", "y", "z"]]
vmag = v.pow(2).sum(axis=1).pipe(np.sqrt)
vuv = v.divide(vmag, axis=0)
cos_theta = vuv.multiply(buv, axis=1).sum(axis=1)
# print("",
# "<Test>",
# "<buv>", type(buv), buv,
# "<v>", type(v), v,
# "<vmag>", type(vmag), vmag,
# "<vuv>", type(vuv), vuv,
# "<cos_theta>", type(cos_theta), cos_theta,
# "",
# sep="\n")
b = vector.BField(b)
pdt.assert_series_equal(cos_theta, self.object_testing.cos_theta(b))
# Projecting a thing onto itself should return 1 for parallel
# and 0 for perp.
v = vector.Vector(v)
vuv = vector.Vector(vuv)
par = pd.Series(1.0, index=vmag.index)
pdt.assert_series_equal(par, self.object_testing.cos_theta(v))
pdt.assert_series_equal(par, self.object_testing.cos_theta(vuv))
msg = "`project` method needs"
with self.assertRaisesRegex(NotImplementedError, msg):
self.object_testing.project(b.data)
# class TestGSE(VectorTestBase, base.SWEData):
# @classmethod
# def set_object_testing(cls):
# # print("TestGSE.set_object_testing", flush=True)
# data = cls.data.gse.xs("", axis=1, level="S")
# gse = vector.Vector(data)
# cls.object_testing = gse
# cls.data = data
# # print("Done with TestGSE.set_object_testing", flush=True)
class TestBField(VectorTestBase, base.SWEData):
@classmethod
def set_object_testing(cls):
# print("BField.set_object_testing", flush=True)
data = cls.data.b.xs("", axis=1, level="S")
# b = vector.Vector(data)
b = vector.BField(data)
cls.object_testing = b
cls.data = data
# print("Done with BField.set_object_testing", flush=True)
def test_pressure(self):
print_inline_debug = False
bsq = self.data.loc[:, ["x", "y", "z"]].pow(2.0).sum(axis=1)
const = 1e-18 / (2.0 * constants.mu_0 * 1e-12) # ([b]**2 / 2.0 * \mu_0 * [p])
pb = bsq * const
pb.name = "pb"
# ot = self.object_testing
# pdb.set_trace()
if print_inline_debug:
print(
"",
"<Test>",
"<bsq>",
type(bsq),
bsq,
"<const>: %s" % const,
"<pb>",
type(pb),
pb,
sep="\n",
end="\n\n",
)
print(
"<Module>",
"<object testing>",
type(self.object_testing),
self.object_testing,
"<dir(ot)>",
*dir(self.object_testing),
sep="\n",
end="\n\n"
)
pdt.assert_series_equal(pb, self.object_testing.pressure)
pdt.assert_series_equal(pb, self.object_testing.pb)
pdt.assert_series_equal(self.object_testing.pressure, self.object_testing.pb)
class VelocityTestBase(VectorTestBase):
@classmethod
def set_object_testing(cls):
# print("VelocityTestBase.set_object_testing", flush=True)
data = cls.data.v.xs(cls().species, axis=1, level="S")
v = vector.Vector(data)
cls.object_testing = v
cls.data = data
# print("Done with VelocityTestBase.set_object_testing", flush=True)
@abstractproperty
def species(self):
pass
class TestVelocityAlpha(base.AlphaTest, VelocityTestBase, base.SWEData):
pass
class TestVelocityP1(base.P1Test, VelocityTestBase, base.SWEData):
pass
class TestVelocityP2(base.P2Test, VelocityTestBase, base.SWEData):
pass
#####
# Tensors
#####
class TensorTestBase(QuantityTestBase):
def test_components(self):
t = self.data
| pdt.assert_series_equal(t.par, self.object_testing.par) | pandas.testing.assert_series_equal |
#1. 데이터를 db에 넣기
#sklearn에서 dataset 가져와서
from sklearn import datasets
boston = datasets.load_boston()
#dataset을 pandas로 변형
import pandas as pd
df = | pd.DataFrame(boston['data'],columns=boston['feature_names']) | pandas.DataFrame |
import bz2
import copy
from functools import partial
import gzip
import io
from inspect import signature
import json
from logging import getLogger, INFO
import lzma
import multiprocessing as mp
import pickle
import re
import sys
import traceback as trc
# to accept all typing.*
from typing import *
import warnings
import pandas as pd
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import ResourceNotFoundError
from azfs.clients import AzfsClient, TextReader
from azfs.error import (
AzfsInputError,
AzfsDecoratorFileFormatError,
AzfsDecoratorSizeNotMatchedError,
AzfsDecoratorReturnTypeError
)
from azfs.utils import (
BlobPathDecoder,
ls_filter
)
__all__ = ["AzFileClient", "ExportDecorator", "export_decorator"]
logger = getLogger(__name__)
logger.setLevel(INFO)
class ExportDecorator:
def __init__(self):
self.functions = []
def register(self, _as: Optional[str] = None):
def _wrapper(func: callable):
func_name = func.__name__
self.functions.append(
{
"function_name": func_name,
"register_as": _as if _as is not None else func_name,
"function": func
}
)
return func
return _wrapper
__call__ = register
export_decorator = ExportDecorator()
def _wrap_quick_load(inputs: dict):
"""
read wrapper function for multiprocessing.
Args:
inputs:
Returns:
"""
return _quick_load(**inputs)
def _quick_load(
path: str,
file_format: Optional[str] = None,
credential: Optional[str] = None,
apply_method: Optional[callable] = None) -> pd.DataFrame:
"""
read function for multiprocessing.
Args:
path: file-path to read
file_format: format of the file
credential:
apply_method:
Returns:
pd.DataFrame
"""
if credential is None:
azc = AzFileClient()
else:
azc = AzFileClient(credential=credential)
# set file_format if None
if file_format is None:
if path.endswith(".csv"):
file_format = "csv"
elif path.endswith(".parquet"):
file_format = "parquet"
elif path.endswith(".pkl"):
file_format = "pickle"
else:
raise AzfsInputError("file_format is incorrect")
# read file as pandas DataFrame
if file_format == "csv":
df = azc.read_csv(path=path)
elif file_format == "parquet":
df = azc.read_parquet(path=path)
elif file_format == "pickle":
df = azc.read_pickle(path=path)
else:
raise AzfsInputError("file_format is incorrect")
# apply additional function
if apply_method is None:
return df
else:
return apply_method(df)
class DataFrameReader:
def __init__(
self,
_azc,
credential: Union[str, DefaultAzureCredential],
path: Union[str, List[str]] = None,
use_mp=False,
cpu_count: Optional[int] = None,
file_format: Optional[str] = None):
self._azc: AzFileClient = _azc
# DefaultCredential cannot be pickle (when use multiprocessing), so make it None
self._credential = credential if type(credential) is str else None
self.path: Optional[List[str]] = self._decode_path(path=path)
self.file_format = file_format
self.use_mp = use_mp
self.cpu_count = mp.cpu_count() if cpu_count is None else cpu_count
self._apply_method = None
def _decode_path(self, path: Optional[Union[str, List[str]]]) -> Optional[List[str]]:
"""
decode path to be read by azc
Args:
path: azure blob path
Returns:
"""
if path is None:
return None
elif type(path) is str:
if "*" in path:
decoded_path = self._azc.glob(pattern_path=path)
else:
decoded_path = [path]
elif type(path) is list:
decoded_path = path
else:
raise AzfsInputError("path must be `str` or `list`")
return decoded_path
def csv(self, path: Union[str, List[str]] = None, **kwargs) -> pd.DataFrame:
"""
read csv files in Azure Blob, like PySpark-method.
Args:
path: azure blob path
**kwargs: as same as pandas.read_csv
Returns:
pd.DataFrame
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> blob_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> df = azc.read().csv(blob_path)
# result is as same as azc.read_csv(blob_path)
>>> blob_path_list = [
... "https://testazfs.blob.core.windows.net/test_container/test1.csv",
... "https://testazfs.blob.core.windows.net/test_container/test2.csv"
... ]
>>> df = azc.read().csv(blob_path_list)
# result is as same as pd.concat([each data-frame])
# in addition, you can use `*`
>>> blob_path_pattern = "https://testazfs.blob.core.windows.net/test_container/test*.csv"
>>> df = azc.read().csv(blob_path_pattern)
# you can use multiprocessing with `use_mp` argument
>>> df = azc.read(use_mp=True).csv(blob_path_pattern)
# if you want to filter or apply some method, you can use your defined function as below
>>> def filter_function(_df: pd.DataFrame, _id: str) -> pd.DataFrame:
... return _df[_df['id'] == _id]
>>> df = azc.read(use_mp=True).apply(function=filter_function, _id="aaa").csv(blob_path_pattern)
"""
self.file_format = "csv"
if path is not None:
self.path = self._decode_path(path=path)
return self._load(**kwargs)
def parquet(self, path: Union[str, List[str]] = None) -> pd.DataFrame:
"""
read parquet files in Azure Blob, like PySpark-method.
Args:
path: azure blob path
Returns:
pd.DataFrame
"""
self.file_format = "parquet"
if path is not None:
self.path = self._decode_path(path=path)
return self._load()
def pickle(self, path: Union[str, List[str]] = None, compression: str = "gzip") -> pd.DataFrame:
"""
read pickle files in Azure Blob, like PySpark-method.
Args:
path: azure blob path
compression: acceptable keywords are: gzip, bz2, xz. gzip is default value.
Returns:
pd.DataFrame
"""
self.file_format = "pickle"
if path is not None:
self.path = self._decode_path(path=path)
return self._load(compression=compression)
def _load_function(self) -> callable:
"""
get read_* function according to the file_format
Returns:
"""
if self.file_format == "csv":
load_function = self._azc.read_csv
elif self.file_format == "parquet":
load_function = self._azc.read_parquet
elif self.file_format == "pickle":
load_function = self._azc.read_pickle
else:
raise AzfsInputError("file_format is incorrect")
return load_function
def apply(self, *, function: callable, **kwargs):
"""
to apply pandas DataFrame
Args:
function: first argument must pass pd.DataFrame
**kwargs: argument to pass the function
Returns:
self
"""
if kwargs:
self._apply_method = partial(function, **kwargs)
else:
self._apply_method = function
return self
def _load(self, **kwargs) -> Optional[pd.DataFrame]:
if self.path is None:
raise AzfsInputError("input azure blob path")
if self.use_mp:
params_list = []
for f in self.path:
_input = {
"path": f,
"file_format": self.file_format,
"credential": self._credential,
"apply_method": self._apply_method
}
_input.update(kwargs)
params_list.append(_input)
with mp.Pool(self.cpu_count) as pool:
df_list = pool.map(_wrap_quick_load, params_list)
pool.join()
else:
load_function = self._load_function()
if self._apply_method is None:
df_list = [load_function(f, **kwargs) for f in self.path]
else:
df_list = [self._apply_method(load_function(f, **kwargs)) for f in self.path]
if len(df_list) == 0:
return None
return pd.concat(df_list)
class AzFileClient:
"""
AzFileClient is
* list files in blob (also with wildcard ``*``),
* check if file exists,
* read csv as pd.DataFrame, and json as dict from blob,
* write pd.DataFrame as csv, and dict as json to blob,
Examples:
>>> import azfs
>>> from azure.identity import DefaultAzureCredential
credential is not required if your environment is on AAD
>>> azc = azfs.AzFileClient()
credential is required if your environment is not on AAD
>>> credential = "[your storage account credential]"
>>> azc = azfs.AzFileClient(credential=credential)
# or
>>> credential = DefaultAzureCredential()
>>> azc = azfs.AzFileClient(credential=credential)
connection_string will be also acceptted
>>> connection_string = "[your connection_string]"
>>> azc = azfs.AzFileClient(connection_string=connection_string)
"""
class AzContextManager:
"""
AzContextManger provides easy way to set new function as attribute to another package like pandas.
"""
def __init__(self):
self.register_list = []
def register(self, _as: str, _to: object):
"""
register decorated function to self.register_list.
Args:
_as: new method name
_to: assign to class or object
Returns:
decorated function
"""
def _register(function):
"""
append ``wrapper`` function
Args:
function:
Returns:
"""
def wrapper(class_instance):
"""
accept instance in kwargs as name of ``az_file_client_instance``
Args:
class_instance: always instance of AzFileClient
Returns:
"""
def new_function(*args, **kwargs):
"""
actual wrapped function
Args:
*args:
**kwargs:
Returns:
"""
target_function = getattr(class_instance, function.__name__)
df = args[0] if isinstance(args[0], pd.DataFrame) else None
if df is not None:
kwargs['df'] = args[0]
return target_function(*args[1:], **kwargs)
return target_function(*args, **kwargs)
return new_function
function_info = {
"assign_as": _as,
"assign_to": _to,
"function": wrapper
}
self.register_list.append(function_info)
return function
return _register
def attach(self, client: object):
"""
set new function as attribute based on self.register_list
Args:
client: set AzFileClient always
Returns:
None
"""
for f in self.register_list:
setattr(f['assign_to'], f['assign_as'], f['function'](class_instance=client))
def detach(self):
"""
set None based on self.register_list
Returns:
None
"""
for f in self.register_list:
setattr(f['assign_to'], f['assign_as'], None)
# instance for context manager
_az_context_manager = AzContextManager()
def __init__(
self,
credential: Optional[Union[str, DefaultAzureCredential]] = None,
connection_string: Optional[str] = None):
"""
if every argument is None, set credential as DefaultAzureCredential().
Args:
credential: if string, Blob Storage -> Access Keys -> Key
connection_string: connection_string
"""
if credential is None and connection_string is None:
credential = DefaultAzureCredential()
self._client = AzfsClient(credential=credential, connection_string=connection_string)
self._credential = credential
def __enter__(self):
"""
add some functions to pandas module based on AzContextManger()
Returns:
instance of AzFileClient
"""
self._az_context_manager.attach(client=self)
return self
def __exit__(self, exec_type, exec_value, traceback):
"""
remove some functions from pandas module based on AzContextManager()
Args:
exec_type:
exec_value:
traceback:
Returns:
None
"""
self._az_context_manager.detach()
def exists(self, path: str) -> bool:
"""
check if specified file exists or not.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
Returns:
``True`` if files exists, otherwise ``False``
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> azc.exists(path=csv_path)
True
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/not_exist_test1.csv"
>>> azc.exists(path=csv_path)
False
"""
try:
_ = self.info(path=path)
except ResourceNotFoundError:
return False
else:
return True
def ls(self, path: str, attach_prefix: bool = False) -> list:
"""
list blob file from blob or dfs.
Args:
path: Azure Blob path URL format, ex: https://testazfs.blob.core.windows.net/test_container
attach_prefix: return full_path if True, return only name
Returns:
list of azure blob files
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container"
>>> azc.ls(csv_path)
[
"test1.csv",
"test2.csv",
"test3.csv",
"directory_1",
"directory_2"
]
>>> azc.ls(path=path, attach_prefix=True)
[
"https://testazfs.blob.core.windows.net/test_container/test1.csv",
"https://testazfs.blob.core.windows.net/test_container/test2.csv",
"https://testazfs.blob.core.windows.net/test_container/test3.csv",
"https://testazfs.blob.core.windows.net/test_container/directory_1",
"https://testazfs.blob.core.windows.net/test_container/directory_2"
]
"""
_, account_kind, _, file_path = BlobPathDecoder(path).get_with_url()
file_list = self._client.get_client(account_kind=account_kind).ls(path=path, file_path=file_path)
if account_kind in ["dfs", "blob"]:
file_name_list = ls_filter(file_path_list=file_list, file_path=file_path)
if attach_prefix:
path = path if path.endswith("/") else f"{path}/"
file_full_path_list = [f"{path}{f}" for f in file_name_list]
return file_full_path_list
else:
return file_name_list
elif account_kind in ["queue"]:
return file_list
def cp(self, src_path: str, dst_path: str, overwrite=False) -> bool:
"""
copy the data from `src_path` to `dst_path`
Args:
src_path:
Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
dst_path:
Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test2.csv``
overwrite:
Returns:
"""
if src_path == dst_path:
raise AzfsInputError("src_path and dst_path must be different")
if (not overwrite) and self.exists(dst_path):
raise AzfsInputError(f"{dst_path} is already exists. Please set `overwrite=True`.")
data = self._get(path=src_path)
if type(data) is io.BytesIO:
self._put(path=dst_path, data=data.read())
elif type(data) is bytes:
self._put(path=dst_path, data=data)
return True
def rm(self, path: str) -> bool:
"""
delete the file in blob
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
Returns:
True if target file is correctly removed.
"""
_, account_kind, _, _ = BlobPathDecoder(path).get_with_url()
return self._client.get_client(account_kind=account_kind).rm(path=path)
def info(self, path: str) -> dict:
"""
get file properties, such as
``name``, ``creation_time``, ``last_modified_time``, ``size``, ``content_hash(md5)``.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
Returns:
dict info of some file
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> azc.info(path=csv_path)
{
"name": "test1.csv",
"size": "128KB",
"creation_time": "",
"last_modified": "",
"etag": "etag...",
"content_type": "",
"type": "file"
}
"""
_, account_kind, _, _ = BlobPathDecoder(path).get_with_url()
# get info from blob or data-lake storage
data = self._client.get_client(account_kind=account_kind).info(path=path)
# extract below to determine file or directory
content_settings = data.get("content_settings", {})
metadata = data.get("metadata", {})
data_type = ""
if "hdi_isfolder" in metadata:
# only data-lake storage has `hdi_isfolder`
data_type = "directory"
elif content_settings.get("content_type") is not None:
# blob and data-lake storage have `content_settings`,
# and its value of the `content_type` must not be None
data_type = "file"
return {
"name": data.get("name", ""),
"size": data.get("size", ""),
"creation_time": data.get("creation_time", ""),
"last_modified": data.get("last_modified", ""),
"etag": data.get("etag", ""),
"content_type": content_settings.get("content_type", ""),
"type": data_type
}
def checksum(self, path: str) -> str:
"""
Blob and DataLake storage have etag.
Args:
path:
Returns:
etag
Raises:
KeyError: if info has no etag
"""
return self.info(path=path)["etag"]
def size(self, path) -> Optional[Union[int, str]]:
"""
Size in bytes of file
Args:
path:
Returns:
"""
return self.info(path).get("size")
def isdir(self, path) -> bool:
"""
Is this entry directory-like?
Args:
path:
Returns:
"""
try:
return self.info(path)["type"] == "directory"
except IOError:
return False
def isfile(self, path) -> bool:
"""
Is this entry file-like?
Args:
path:
Returns:
"""
try:
return self.info(path)["type"] == "file"
except IOError:
return False
def glob(self, pattern_path: str) -> List[str]:
"""
Currently only support ``* (wildcard)`` .
By default, ``glob()`` lists specified files with formatted-URL.
Args:
pattern_path: ex: ``https://<storage_account_name>.blob.core.windows.net/<container>/*/*.csv``
Returns:
lists specified files filtered by wildcard
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> path = "https://testazfs.blob.core.windows.net/test_container/some_folder"
ls() lists all files in some folder like
>>> azc.ls(path)
[
"test1.csv",
"test2.csv",
"test3.csv",
"test1.json",
"test2.json",
"directory_1",
"directory_2"
]
glob() lists specified files according to the wildcard, and lists with formatted-URL by default
>>> csv_pattern_path = "https://testazfs.blob.core.windows.net/test_container/some_folder/*.csv"
>>> azc.glob(path=csv_pattern_path)
[
"https://testazfs.blob.core.windows.net/test_container/some_folder/test1.csv",
"https://testazfs.blob.core.windows.net/test_container/some_folder/test2.csv",
"https://testazfs.blob.core.windows.net/test_container/some_folder/test3.csv"
]
glob() can use any path
>>> csv_pattern_path = "https://testazfs.blob.core.windows.net/test_container/some_folder/test1.*"
>>> azc.glob(path=csv_pattern_path)
[
"https://testazfs.blob.core.windows.net/test_container/some_folder/test1.csv",
"https://testazfs.blob.core.windows.net/test_container/some_folder/test1.json"
]
also deeper folders
>>> csv_pattern_path = "https://testazfs.blob.core.windows.net/test_container/some_folder/*/*.csv"
>>> azc.glob(path=csv_pattern_path)
[
"https://testazfs.blob.core.windows.net/test_container/some_folder/directory_1/deeper_test1.csv",
"https://testazfs.blob.core.windows.net/test_container/some_folder/directory_2/deeper_test2.csv"
]
Raises:
AzfsInputError: when ``*`` is used in root_flder under a container.
"""
if "*" not in pattern_path:
raise AzfsInputError("no any `*` in the `pattern_path`")
url, account_kind, container_name, file_path = BlobPathDecoder(pattern_path).get_with_url()
acceptable_folder_pattern = r"(?P<root_folder>[^\*]+)/(?P<folders>.*)"
result = re.match(acceptable_folder_pattern, file_path)
if result:
result_dict = result.groupdict()
root_folder = result_dict['root_folder']
else:
raise AzfsInputError(
f"Cannot use `*` in root_folder under a container. Accepted format is {acceptable_folder_pattern}"
)
# get container root path
base_path = f"{url}/{container_name}/"
if account_kind in ["dfs", "blob"]:
file_list = self._client.get_client(account_kind=account_kind).ls(path=base_path, file_path=root_folder)
# to escape special chars for regular-expression
def _escape(input_str: str) -> str:
special_chars = ["(", ")", "[", "]"]
for c in special_chars:
input_str = input_str.replace(c, f"\\{c}")
return input_str
escaped_pattern_path = _escape(pattern_path)
# fix pattern_path, in order to avoid matching `/`
replace_pattern_path = escaped_pattern_path.replace('*', '([^/])*?')
pattern = re.compile(f"{replace_pattern_path}$")
file_full_path_list = [f"{base_path}{f}" for f in file_list]
# filter with pattern.match
matched_full_path_list = [f for f in file_full_path_list if pattern.match(f)]
return matched_full_path_list
elif account_kind in ["queue"]:
raise NotImplementedError
def read(
self,
*,
path: Union[str, List[str]] = None,
use_mp: bool = False,
cpu_count: Optional[int] = None,
file_format: str = "csv") -> DataFrameReader:
"""
read csv, parquet, picke files in Azure Blob, like PySpark-method.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
use_mp: Default, False
cpu_count: Default, as same as mp.cpu_count()
file_format: determined by which function you call
Returns:
pd.DataFrame
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> blob_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> df = azc.read().csv(blob_path)
# result is as same as azc.read_csv(blob_path)
>>> blob_path_list = [
... "https://testazfs.blob.core.windows.net/test_container/test1.csv",
... "https://testazfs.blob.core.windows.net/test_container/test2.csv"
... ]
>>> df = azc.read().csv(blob_path_list)
# result is as same as pd.concat([each data-frame])
# in addition, you can use `*`
>>> blob_path_pattern = "https://testazfs.blob.core.windows.net/test_container/test*.csv"
>>> df = azc.read().csv(blob_path_pattern)
# you can use multiprocessing with `use_mp` argument
>>> df = azc.read(use_mp=True).csv(blob_path_pattern)
# if you want to filter or apply some method, you can use your defined function as below
>>> def filter_function(_df: pd.DataFrame, _id: str) -> pd.DataFrame:
... return _df[_df['id'] == _id]
>>> df = azc.read(use_mp=True).apply(function=filter_function, _id="aaa").csv(blob_path_pattern)
"""
return DataFrameReader(
_azc=self,
credential=self._credential,
path=path,
use_mp=use_mp,
cpu_count=cpu_count,
file_format=file_format)
def _get(self, path: str, offset: int = None, length: int = None, **kwargs) -> Union[bytes, str, io.BytesIO, dict]:
"""
get data from Azure Blob Storage.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
offset:
length:
**kwargs:
Returns:
some data
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
you can read csv file in azure blob storage
>>> data = azc.get(path=csv_path)
`download()` is same method as `get()`
>>> data = azc.download(path=csv_path)
"""
_, account_kind, _, _ = BlobPathDecoder(path).get_with_url()
file_bytes = self._client.get_client(
account_kind=account_kind).get(path=path, offset=offset, length=length, **kwargs)
# gzip圧縮ファイルは一旦ここで展開
if path.endswith(".gz"):
file_bytes = gzip.decompress(file_bytes)
if type(file_bytes) is bytes:
file_to_read = io.BytesIO(file_bytes)
else:
file_to_read = file_bytes
return file_to_read
def read_line_iter(self, path: str) -> iter:
"""
To read text file in each line with iterator.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
Returns:
get data of the path as iterator
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> for l in azc.read_line_iter(path=csv_path)
... print(l.decode("utf-8"))
"""
_, account_kind, _, _ = BlobPathDecoder(path).get_with_url()
return TextReader(client=self._client.get_client(account_kind=account_kind), path=path)
def read_csv_chunk(self, path: str, chunk_size: int) -> pd.DataFrame:
"""
!WARNING! the method may differ from current version in the future update.
Currently, only support for csv.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
chunk_size: pandas-DataFrame index length to read.
Returns:
first time: len(df.index) is `chunk_size - 1`
second time or later: len(df.index) is `chunk_size`
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
>>> read_chunk_size = 100
>>> for _df in azc.read_csv_chunk(path=csv_path, chunk_size=read_chunk_size):
... print(_df)
"""
warning_message = """
The method is under developing.
The name or the arguments may differ from current version in the future update.
"""
warnings.warn(warning_message, FutureWarning)
initial_line = ""
byte_list = []
for idx, l in enumerate(self.read_line_iter(path=path)):
div_idx = idx % chunk_size
if idx == 0:
initial_line = l
byte_list.append(initial_line)
else:
byte_list.append(l)
if div_idx + 1 == chunk_size:
file_to_read = (b"\n".join(byte_list))
file_to_io_read = io.BytesIO(file_to_read)
df = pd.read_csv(file_to_io_read)
yield df
byte_list = [initial_line]
# make remainder DataFrame after the for-loop
file_to_read = (b"\n".join(byte_list))
file_to_io_read = io.BytesIO(file_to_read)
df = pd.read_csv(file_to_io_read)
yield df
@_az_context_manager.register(_as="read_csv_az", _to=pd)
def read_csv(self, path: str, **kwargs) -> pd.DataFrame:
"""
get csv data as pd.DataFrame from Azure Blob Storage.
support ``csv`` and also ``csv.gz``.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.csv``
**kwargs: keywords to put df.read_csv(), such as ``header``, ``encoding``.
Returns:
pd.DataFrame
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> csv_path = "https://testazfs.blob.core.windows.net/test_container/test1.csv"
you can read and write csv file in azure blob storage
>>> df = azc.read_csv(path=csv_path)
Using `with` statement, you can use `pandas`-like methods
>>> with azc:
>>> df = pd.read_csv_az(path)
"""
file_to_read = self._get(path)
return pd.read_csv(file_to_read, **kwargs)
@_az_context_manager.register(_as="read_table_az", _to=pd)
def read_table(self, path: str, **kwargs) -> pd.DataFrame:
"""
get tsv data as pd.DataFrame from Azure Blob Storage.
support ``tsv``.
Args:
path: Azure Blob path URL format, ex: ``https://testazfs.blob.core.windows.net/test_container/test1.tsv``
**kwargs: keywords to put df.read_csv(), such as ``header``, ``encoding``.
Returns:
pd.DataFrame
Examples:
>>> import azfs
>>> azc = azfs.AzFileClient()
>>> tsv_path = "https://testazfs.blob.core.windows.net/test_container/test1.tsv"
you can read and write csv file in azure blob storage
>>> df = azc.read_table(path=tsv_path)
Using `with` statement, you can use `pandas`-like methods
>>> with azc:
>>> df = pd.read_table_az(tsv_path)
"""
file_to_read = self._get(path)
return | pd.read_table(file_to_read, **kwargs) | pandas.read_table |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import array
import functools
import gzip
import operator
import os
import struct
import numpy as np
import pandas as pd
import requests
class DataManager:
_train_data_url: str
_train_labels_url: str
_test_data_url: str
_test_labels_url: str
_train_data_loc: str
_train_labels_loc: str
_test_data_loc: str
_test_labels_loc: str
_data_columns: []
_data_string_labels: []
def __init__(self):
self._train_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
self._test_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
self._train_data_loc = "systemds/examples/tutorials/adult/train_data.csv"
self._test_data_loc = "systemds/examples/tutorials/adult/test_data.csv"
self._data_columns = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation",
"relationship", "race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country",
"income"]
self._classification_features_labels = [{'workclass': ['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay', 'Never-worked']},
{'education': ['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th', '12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool']},
{'marital-status': ['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent', 'Married-AF-spouse']},
{'occupation': ['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty', 'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving', 'Priv-house-serv', 'Protective-serv', 'Armed-Forces']},
{'relationship': ['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried']},
{'race': ['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black']},
{'sex': ['Female', 'Male']},
{'native-country': ['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)', 'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland', 'Jamaica', 'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador', 'Taiwan', 'Haiti', 'Columbia', 'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia', 'El-Salvador', 'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands']},
{'income': ['>50K', '<=50K']}]
def get_train_data(self) -> np.array:
self._get_data(self._train_data_url, self._train_data_loc)
return self._parse_data(self._train_data_loc)\
.drop(labels=self._data_columns[len(self._data_columns)-1], axis=1).to_numpy()
def get_train_labels(self) -> np.array:
self._get_data(self._train_data_url, self._train_data_loc)
data_list = self._data_columns.copy()
data_list.pop(len(self._data_columns)-1)
data = self._parse_data(self._train_data_loc).drop(labels=data_list, axis=1)
return data.to_numpy().flatten()
def get_test_data(self) -> np.array:
self._get_data(self._test_data_url, self._test_data_loc)
return self._parse_data(self._test_data_loc)\
.drop(labels=self._data_columns[len(self._data_columns)-1], axis=1).iloc[1:].to_numpy()
def get_test_labels(self) -> np.array:
self._get_data(self._test_data_url, self._test_data_loc)
data_list = self._data_columns.copy()
data_list.pop(len(self._data_columns)-1)
data = self._parse_data(self._test_data_loc).drop(labels=data_list, axis=1).iloc[1:]
data["income"] = data["income"].str.replace('>50K.','>50K', regex=False)
data["income"] = data["income"].str.replace('<=50K.','<=50K', regex=False)
return data.to_numpy().flatten()
def _parse_data(self, loc) -> pd.DataFrame:
return pd.read_csv(loc, header=None, names=self._data_columns)
def _get_data(self, url, loc):
if not os.path.isfile(loc):
myfile = requests.get(url)
folder = os.path.dirname(loc)
if not os.path.isdir(folder):
os.makedirs(folder)
with open(loc, 'wb') as f:
f.write(myfile.content)
def get_preprocessed_dataset(self, interpolate=False, standardize=False, dimred=0):
train_array = np.concatenate([self.get_train_data(), self.get_train_labels()[...,np.newaxis]], axis=1)
train_dataset = pd.DataFrame(train_array, columns=self._data_columns)
test_array = np.concatenate([self.get_test_data(), self.get_test_labels()[...,np.newaxis]], axis=1)
test_dataset = pd.DataFrame(test_array, columns=self._data_columns)
if not interpolate:
train_dataset = train_dataset[~(train_dataset.astype(str) == ' ?').any(1)]
test_dataset = test_dataset[~(test_dataset.astype(str) == ' ?').any(1)]
train_len = len(train_dataset)
combined_dataset = train_dataset.append(test_dataset, ignore_index=True, sort=False)
conditional_labels = [list(dic.keys())[0]for dic in self._classification_features_labels]
combined_dataset_frame = combined_dataset.copy().drop(labels=conditional_labels, axis=1)
combined_dataset_frame = combined_dataset_frame.apply(pd.to_numeric)
if standardize:
train_data = combined_dataset_frame.iloc[0:train_len,:]
test_data = combined_dataset_frame.iloc[train_len:,:]
train_mean = train_data.mean(axis=0)
train_std = train_data.std(axis=0)
train_data = (train_data - train_mean)/train_std
test_data = (test_data - train_mean)/train_std
combined_dataset_frame = train_data.append(test_data, ignore_index=True, sort=False)
for x in self._classification_features_labels:
#insert most common string
current_frame = combined_dataset[list(x.keys())[0]]
if interpolate:
most_common_category = current_frame.iloc[:train_len].mode()
current_frame = current_frame.str.replace(' ?', most_common_category.iloc[0], regex=False)
if dimred > 0 and dimred <= 1:
labels_percent = (current_frame.iloc[:train_len].value_counts() / train_len)
labels_to_combine = labels_percent.index[labels_percent < dimred]
current_frame = current_frame.str.replace("|".join(labels_to_combine), " other", regex=True)
converted_one_hot_column = pd.get_dummies(current_frame, prefix=x.keys())
combined_dataset_frame = | pd.concat([combined_dataset_frame, converted_one_hot_column], axis=1, join="outer", sort=False) | pandas.concat |
import numpy as np
import pandas as pd
class DataGenerator:
def __init__(self, file_path, names=None, features=None, labels=None):
raw_data = | pd.read_csv(file_path, names=names) | pandas.read_csv |
# Collection of functions to process and laod tables for visualisation
# for a set of schools whose data has been updated through syncthing_data
from math import ceil
from scripts.clix_platform_data_processing.get_static_vis_data import get_log_level_data, get_engagement_metrics
from scripts.clix_platform_data_processing.get_static_vis_data import get_num_days_tools, get_num_stud_tools, get_avgtime_perday_tools, get_studperday_tools
from scripts.clix_platform_data_processing.get_static_vis_data import get_avg_percnt_visits_modules, get_num_stud_modules, clean_code
import config.clix_config as clix_config
import time
from datetime import datetime
from airflow.models import Variable
import pandas
import json
from functools import reduce
from airflow.models import Variable
from airflow.models import TaskInstance
from airflow.models import DagBag
tools_modules_server_logs_datapath = clix_config.local_dst_state_data_logs
def load_to_db(metric_data):
pass
def partition(lst, n=clix_config.num_school_chunks):
if (len(lst) < n):
return [lst, [], [], []]
else:
division = len(lst) / n
return [lst[round(division * i):round(division * (i + 1))] for i in range(n)]
def process_school_data(state, chunk, **context):
'''
Function to process tables for a set of schools whose
data has been updated through syncthing
'''
if state == 'tg':
state_new = 'ts'
elif state == 'ct':
state_new = 'cg'
else:
state_new = state
list_of_schools = context['ti'].xcom_pull(task_ids='sync_state_data_' + state_new, key = 'school_update_list')
schools_to_process = partition(list_of_schools)[chunk]
print(schools_to_process)
if schools_to_process:
#print('Got all schools')
#This date range is just to process latest data logs and then append them to already processed logs data
# for each state
date_range = ['2018-06-01', str(datetime.utcnow().date())]
#date_range = [Variable.get('prev_update_date_static_' + state), Variable.get('curr_update_date_static_' + state)]
schools_log_data = get_log_level_data(schools=schools_to_process, state=state, date_range=date_range)
# Save chunk of tools data of a state
tools_temp_path = tools_modules_server_logs_datapath + 'tools_temp' + '/' + state + '_' + str(chunk) + '.csv'
schools_log_data[0].to_csv(tools_temp_path, index=False)
# Save chunk of modules data of a state
modules_temp_path = tools_modules_server_logs_datapath + 'modules_temp' + '/' + state + '_' + str(chunk) + '.csv'
schools_log_data[1][0].to_csv(modules_temp_path, index=False)
# Save chunk of serverlogs data of a state
serverlogs_temp_path = tools_modules_server_logs_datapath + 'serverlogs_temp' + '/' + state + '_' + str(chunk) + '.json'
server_logs_data = {key: [each.strftime('%Y%m%d') for each in values] for key, values in schools_log_data[1][1].items()}
with open(serverlogs_temp_path, 'w', encoding='utf-8') as f:
json.dump(server_logs_data, f, ensure_ascii=True, indent=4)
f.close()
all_chunks = [*range(clix_config.num_school_chunks)]
all_chunks.remove(chunk)
try:
dag_bag = DagBag('/usr/local/airflow/dags/clix_static_visuals_dag.py')
target_dag = dag_bag.get_dag('clix_static_visuals_dag')
dr = target_dag.get_dagrun(target_dag.latest_execution_date)
ti_list = [dr.get_task_instance('process_raw_state_data_' + str(each) + '_' + state) for each in all_chunks]
except Exception as e:
import pdb
pdb.set_trace()
other_tasks_status = all([each.current_state() == 'success' for each in ti_list])
if other_tasks_status:
Variable.set('last_updated_date_static_' + state, datetime.utcnow().date())
else:
print('No schools to process for this task')
return None
def combine_chunks(state, **context):
list_of_data_chunks_tools = []
list_of_data_chunks_modules = []
list_of_data_chunks_serverlogs = []
for chunk in list(range(clix_config.num_school_chunks)):
tools_temp_path = tools_modules_server_logs_datapath + 'tools_temp/' + state + '_' + str(chunk) + '.csv'
list_of_data_chunks_tools.append(pandas.read_csv(tools_temp_path))
modules_temp_path = tools_modules_server_logs_datapath + 'modules_temp/' + state + '_' + str(chunk) + '.csv'
list_of_data_chunks_modules.append(pandas.read_csv(modules_temp_path))
serverlogs_temp_path = tools_modules_server_logs_datapath + 'serverlogs_temp/' + state + '_' + str(chunk) + '.json'
with open(serverlogs_temp_path, 'r', encoding='utf-8') as f:
list_of_data_chunks_serverlogs.append(json.load(f))
f.close()
#Combine and save tools data of a state
state_tools_logs_file = tools_modules_server_logs_datapath + 'tool_logs_' + state + '.csv'
pandas.concat(list_of_data_chunks_tools).to_csv(state_tools_logs_file)
#Combine and save modules data of a state
state_modules_logs_file = tools_modules_server_logs_datapath + 'module_logs_' + state + '.csv'
pandas.concat(list_of_data_chunks_modules).to_csv(state_modules_logs_file)
#Combine and save serverlog file of a state
state_server_logs_file = tools_modules_server_logs_datapath + 'server_logs_' + state + '.json'
with open(state_server_logs_file, 'w', encoding='utf-8') as fp:
server_logs_data = reduce(lambda x, y: x.update(y) or x, list_of_data_chunks_serverlogs)
json.dump(server_logs_data, fp, ensure_ascii=True, indent=4)
fp.close()
return None
def get_state_static_vis_data(state, all_states_flag, **context):
months_list = Variable.get('static_vis_range', deserialize_json=True)['months_list']
# Get all the data files required for vis data generation
if not all_states_flag:
state_tools_logs_file = tools_modules_server_logs_datapath + 'tool_logs_' + state + '.csv'
state_tools_data = | pandas.read_csv(state_tools_logs_file) | pandas.read_csv |
import pandas as pd
from datetime import timedelta, datetime
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import warnings
warnings.filterwarnings("ignore")
from acquire import get_store_data
# plotting defaults
plt.rc('figure', figsize=(13, 7))
plt.style.use('seaborn-whitegrid')
plt.rc('font', size=16)
######### PREPARE STORE DATA ##############
def prepare():
'''
This function acquires store data, converts datetime, reindexed on date, and adds engineered columns.
'''
df = get_store_data()
print('Data acquired...')
print('Converting to datetime')
df.sale_date = pd.to_datetime(df.sale_date)
df = df.set_index("sale_date").sort_index()
print('Sale date set to datetime and reindexed...')
df['month'] = df.index.month
df['weekday'] = df.index.day_name()
df['sales_total']=df.sale_amount * df.item_price
print('Added engineered columns...')
print('Prepare complete')
return df
########## PREPARE POWER DATA #########################
def prepare2():
'''
This function imports a CSV, assigns datetime, reindexes, adds month and year column, and forward/backfills missing values.
'''
df = | pd.read_csv("https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = | Series(['fooBAD__barBAD', NA, 'foo']) | pandas.Series |
from abc import ABC, abstractmethod
import logging
import os
import tempfile
import pandas as pd
import tensorflow as tf
from .. import normalisation
from ..vector_model import VectorRegressionModel
log = logging.getLogger(__name__)
class TensorFlowSession:
session = None
_isKerasSessionSet = False
@classmethod
def configureSession(cls, gpuAllowGrowth=True, gpuPerProcessMemoryFraction=None):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = gpuAllowGrowth # dynamically grow the memory used on the GPU
tf_config.log_device_placement = False
if gpuPerProcessMemoryFraction is not None:
tf_config.gpu_options.per_process_gpu_memory_fraction = gpuPerProcessMemoryFraction # in case we get CUDNN_STATUS_INTERNAL_ERROR
cls.session = tf.compat.v1.Session(config=tf_config)
@classmethod
def setKerasSession(cls, allowDefault=True):
"""
Sets the (previously configured) session for use with keras if it has not been previously been set.
If no session has been configured, the parameter allowDefault controls whether it is admissible to create a session with default parameters.
:param allowDefault: whether to configure, for the case where no session was previously configured, a new session with the defaults.
"""
if cls.session is None:
if allowDefault:
log.info("No TensorFlow session was configured. Creating a new session with default values.")
cls.configureSession()
else:
raise Exception(f"The session has not yet been configured. Call {cls.__name__}.{cls.configureSession.__name__} beforehand")
if not cls._isKerasSessionSet:
tf.keras.backend.set_session(cls.session)
cls._isKerasSessionSet = True
class KerasVectorRegressionModel(VectorRegressionModel, ABC):
"""An abstract simple model which maps vectors to vectors and works on pandas.DataFrames (for inputs and outputs)"""
def __init__(self, normalisationMode: normalisation.NormalisationMode, loss, metrics, optimiser,
batchSize=64, epochs=1000, validationFraction=0.2):
"""
:param normalisationMode:
:param loss:
:param metrics:
:param optimiser:
:param batchSize:
:param epochs:
:param validationFraction:
"""
super().__init__()
self.normalisationMode = normalisationMode
self.batchSize = batchSize
self.epochs = epochs
self.optimiser = optimiser
self.loss = loss
self.metrics = list(metrics)
self.validationFraction = validationFraction
self.model = None
self.inputScaler = None
self.outputScaler = None
self.trainingHistory = None
def __str__(self):
params = dict(normalisationMode=self.normalisationMode, optimiser=self.optimiser, loss=self.loss, metrics=self.metrics,
epochs=self.epochs, validationFraction=self.validationFraction, batchSize=self.batchSize)
return f"{self.__class__.__name__}{params}"
@abstractmethod
def _createModel(self, inputDim, outputDim):
"""
Creates a keras model
:param inputDim: the number of input dimensions
:param outputDim: the number of output dimensions
:return: the model
"""
pass
def _fit(self, inputs: pd.DataFrame, outputs: pd.DataFrame):
# normalise data
self.inputScaler = normalisation.VectorDataScaler(inputs, self.normalisationMode)
self.outputScaler = normalisation.VectorDataScaler(outputs, self.normalisationMode)
normInputs = self.inputScaler.getNormalisedArray(inputs)
normOutputs = self.outputScaler.getNormalisedArray(outputs)
# split data into training and validation set
trainSplit = int(normInputs.shape[0] * (1-self.validationFraction))
trainInputs = normInputs[:trainSplit]
trainOutputs = normOutputs[:trainSplit]
valInputs = normInputs[trainSplit:]
valOutputs = normOutputs[trainSplit:]
# create and fit model
TensorFlowSession.setKerasSession()
model = self._createModel(inputs.shape[1], outputs.shape[1])
model.compile(optimizer=self.optimiser, loss=self.loss, metrics=self.metrics)
tempFileHandle, tempFilePath = tempfile.mkstemp(".keras.model")
try:
os.close(tempFileHandle)
checkpointCallback = tf.keras.callbacks.ModelCheckpoint(tempFilePath, monitor='val_loss', save_best_only=True, save_weights_only=True)
self.trainingHistory = model.fit(trainInputs, trainOutputs, batch_size=self.batchSize, epochs=self.epochs, verbose=2,
validation_data=(valInputs, valOutputs), callbacks=[checkpointCallback])
model.load_weights(tempFilePath)
finally:
os.unlink(tempFilePath)
self.model = model
def _predict(self, inputs: pd.DataFrame) -> pd.DataFrame:
X = self.inputScaler.getNormalisedArray(inputs)
Y = self.model.predict(X)
Y = self.outputScaler.getDenormalisedArray(Y)
return | pd.DataFrame(Y, columns=self.outputScaler.dimensionNames) | pandas.DataFrame |
""" test feather-format compat """
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.io.feather_format import read_feather, to_feather # isort:skip
pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@filter_sparse
@pytest.mark.single_cpu
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, df, exc, err_msg):
# check that we are raising the exception
# on writing
with pytest.raises(exc, match=err_msg):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_external_error_on_write(self, df):
# check that we are raising the exception
# on writing
with tm.external_error_raised(Exception):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs):
if expected is None:
expected = df
with tm.ensure_clean() as path:
to_feather(df, path, **write_kwargs)
result = read_feather(path, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self):
msg = "feather only support IO with DataFrames"
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg)
def test_basic(self):
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": pd.Categorical(list("abc")),
"dt": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3)), freq=None
),
"dttz": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, tz="US/Eastern")),
freq=None,
),
"dt_with_null": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
"dtns": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.zone == "US/Eastern"
self.check_round_trip(df)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_external_error_on_write(df)
def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
msg = "feather must have string column names"
self.check_error_on_write(df, ValueError, msg)
def test_read_columns(self):
# GH 24025
df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_round_trip(df, expected=df[columns], columns=columns)
def read_columns_different_order(self):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
self.check_round_trip(df, columns=["B", "A"])
def test_unsupported_other(self):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(df)
def test_rw_use_threads(self):
df = pd.DataFrame({"A": np.arange(100000)})
self.check_round_trip(df, use_threads=True)
self.check_round_trip(df, use_threads=False)
def test_write_with_index(self):
df = pd.DataFrame({"A": [1, 2, 3]})
self.check_round_trip(df)
msg = (
r"feather does not support serializing .* for the index; "
r"you can \.reset_index\(\) to make the index into column\(s\)"
)
# non-default index
for index in [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),
]:
df.index = index
self.check_error_on_write(df, ValueError, msg)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
msg = "feather does not serialize index meta-data on a default index"
self.check_error_on_write(df, ValueError, msg)
# column multi-index
df.index = [0, 1, 2]
df.columns = | pd.MultiIndex.from_tuples([("a", 1)]) | pandas.MultiIndex.from_tuples |
from tkinter import *
import pandas
import random
BACKGROUND_COLOR = "#B1DDC6"
FONT_NAME = "COMIC SANS MS"
current_card = {}
to_learn = {}
# --------------------------------------- FETCH DATA FROM CSV ---------------------------------------- #
try:
data = | pandas.read_csv("data/words_to_learn.csv") | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 8 19:49:40 2017
print Baidu Map
@author: luminous
"""
import pandas as pd
"""implore data"""
res_file = open("k_means_res.txt", "r")
#res_file = open("dbscan_res.txt", "r")
k = int(res_file.readline())
str_label = res_file.readline()
res_file.close()
label = str_label.split(" ")
"""remove the last space"""
if label[len(label) - 1] == "":
label.pop(len(label) - 1)
"""implore original gps data"""
gps_data = pd.read_csv("Traj_1000_SH_GPS")
gps_data = | pd.DataFrame(gps_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = | tm.box_expected(idx, box) | pandas.util.testing.box_expected |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index(["A", "B"]).style._translate(True, True)
expected = [
{
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
},
{
"class": "index_name level1",
"type": "th",
"value": "B",
"is_visible": True,
"display_value": "B",
},
{
"class": "blank col0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
},
]
assert result["head"][1] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = DataFrame({0: [1, 2, 3]})
df.style._translate(True, True)
def test_apply_axis(self):
df = DataFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.max()}" for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_series_return(self, axis):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Series return where len(Series) < df.index or df.columns but labels OK
func = lambda s: pd.Series(["color: red;"], index=["Y"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Series return where labels align but different order
func = lambda s: pd.Series(["color: red;", "color: blue;"], index=["Y", "X"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_apply_dataframe_return(self, index, columns):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
df_styles = DataFrame("color: red;", index=idxs, columns=cols)
result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis):
result = (
self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
def test_applymap_subset(self, slice_):
result = (
self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:, pd.IndexSlice["x", "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, ["A", "C"]]], # missing col element
pd.IndexSlice[pd.IndexSlice["a", 1], :],
pd.IndexSlice[pd.IndexSlice[:, 1], :],
pd.IndexSlice[pd.IndexSlice[:, [1, 3]], :], # missing row element
pd.IndexSlice[:, ("x", "A")],
pd.IndexSlice[("a", 1), :],
],
)
def test_applymap_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
isinstance(slice_[-1], tuple)
and isinstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
isinstance(slice_[0], tuple)
and isinstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
# https://github.com/pandas-dev/pandas/issues/25858
# Checks styler.applymap works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
df = DataFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = pd.IndexSlice[:, pd.IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
df.loc[pct_subset]
df.style.applymap(color_negative_red, subset=pct_subset)
def test_empty(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = Styler(df, na_rep="NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
def test_caption(self):
styler = Styler(self.df, caption="foo")
result = styler.to_html()
assert all(["caption" in result, "foo" in result])
styler = self.df.style
result = styler.set_caption("baz")
assert styler is result
assert styler.caption == "baz"
def test_uuid(self):
styler = Styler(self.df, uuid="abc123")
result = styler.to_html()
assert "abc123" in result
styler = self.df.style
result = styler.set_uuid("aaa")
assert result is styler
assert result.uuid == "aaa"
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
result = df.style.to_html(uuid="test")
assert "test" in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
styler = Styler(self.df, table_styles=style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
# GH 39563
style = [{"selector": "th", "props": "foo:bar;"}] # css string format
styler = self.df.style.set_table_styles(style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
def test_table_styles_multiple(self):
ctx = self.df.style.set_table_styles(
[
{"selector": "th,td", "props": "color:red;"},
{"selector": "tr", "props": "color:green;"},
]
)._translate(True, True)["table_styles"]
assert ctx == [
{"selector": "th", "props": [("color", "red")]},
{"selector": "td", "props": [("color", "red")]},
{"selector": "tr", "props": [("color", "green")]},
]
def test_table_styles_dict_multiple_selectors(self):
# GH 44011
result = self.df.style.set_table_styles(
[{"selector": "th,td", "props": [("border-left", "2px solid black")]}]
)._translate(True, True)["table_styles"]
expected = [
{"selector": "th", "props": [("border-left", "2px solid black")]},
{"selector": "td", "props": [("border-left", "2px solid black")]},
]
assert result == expected
def test_maybe_convert_css_to_tuples(self):
expected = [("a", "b"), ("c", "d e")]
assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
expected = []
assert maybe_convert_css_to_tuples("") == expected
def test_maybe_convert_css_to_tuples_err(self):
msg = "Styles supplied as string must follow CSS rule formats"
with pytest.raises(ValueError, match=msg):
maybe_convert_css_to_tuples("err")
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.to_html()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).to_html()
assert 'class="foo" data-bar' in result
def test_apply_none(self):
def f(x):
return DataFrame(
np.where(x == x.max(), "color: red", ""),
index=x.index,
columns=x.columns,
)
result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
def test_trim(self):
result = self.df.style.to_html() # trim=True
assert result.count("#") == 0
result = self.df.style.highlight_max().to_html()
assert result.count("#") == len(self.df.columns)
def test_export(self):
f = lambda x: "color: red" if x > 0 else "color: blue"
g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
style1 = self.styler
style1.applymap(f).applymap(g, z="b").highlight_max()._compute() # = render
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.to_html()
def test_bad_apply_shape(self):
df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
msg = "resulted in the apply method collapsing to a Series."
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: "x")
msg = "created invalid {} labels"
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: [""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: ["", "", "", ""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: pd.Series(["a:v;", ""], index=["A", "C"]), axis=0)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: ["", "", ""], axis=1)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: pd.Series(["a:v;", ""], index=["X", "Z"]), axis=1)
msg = "returned ndarray with wrong shape"
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
def test_apply_bad_return(self):
def f(x):
return ""
df = DataFrame([[1, 2], [3, 4]])
msg = (
"must return a DataFrame or ndarray when passed to `Styler.apply` "
"with axis=None"
)
with pytest.raises(TypeError, match=msg):
df.style._apply(f, axis=None)
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_bad_labels(self, axis):
def f(x):
return DataFrame(**{axis: ["bad", "labels"]})
df = DataFrame([[1, 2], [3, 4]])
msg = f"created invalid {axis} labels."
with pytest.raises(ValueError, match=msg):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
expected = {
(0, 0): 3,
(0, 3): 3,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(index, sparsify=True, max_index=100)
tm.assert_dict_equal(result, expected)
expected = {
(0, 0): 1,
(0, 1): 1,
(0, 2): 1,
(0, 3): 1,
(0, 4): 1,
(0, 5): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(index, sparsify=False, max_index=100)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = | MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]]) | pandas.MultiIndex.from_arrays |
# coding=utf-8
# Author: <NAME>
# Date: Jul 17, 2019
#
# Description: Merges DM Selected Gens with DM Screening Data
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from utils import ensurePathExists
def map_multiple_ids(x, d):
x = x.split(',')
return ','.join([d[i] for i in x])
if __name__ == '__main__':
# pipeline = 'all3-conserved' # 'all3-conserved' or 'all3-pooling-DM'
# Screened Data (From Experimental Analysis)
dfSc = pd.read_csv('data/conserved_DM_screened_2019-11-22.csv', index_col=0)
dfSp = pd.read_csv('data/pooling_DM_screened_2019-11-22.csv', index_col=0)
dfS = pd.concat([dfSc, dfSp], axis='index', join='outer').drop_duplicates()
# DGE Genes per pipeline
dfC = pd.read_csv('../2-core_genes/results/all3-conserved/DM_meiotic_genes.csv', index_col=0)
dfP = pd.read_csv('../2-core_genes/results/all3-pooling-DM/DM_meiotic_genes.csv', index_col=0)
####
# Meta Genes (FOR ANALYSIS SIMPLICITY)
dfM = pd.read_csv('../2-core_genes/results/all3-pooling-DM/meta_meiotic_genes.csv', index_col='id_eggnog')
dfM_HS = pd.read_csv('../2-core_genes/results/all3-pooling-DM/HS_meiotic_genes.csv', index_col='id_string', usecols=['id_gene', 'id_string', 'gene'])
dfM_MM = pd.read_csv('../2-core_genes/results/all3-pooling-DM/MM_meiotic_genes.csv', index_col='id_string', usecols=['id_gene', 'id_string', 'gene'])
dfM_DM = | pd.read_csv('../2-core_genes/results/all3-pooling-DM/DM_meiotic_genes.csv', index_col='id_string', usecols=['id_gene', 'id_string', 'gene']) | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = | pd.concat([df1, df2], axis=1) | pandas.concat |
#!/usr/bin/env python3
"""
script for calculating genome coverage
"""
import os
import sys
import argparse
import pandas as pd
from ctbBio.fasta import iterate_fasta as parse_fasta
def parse_cov(cov_table, scaffold2genome):
"""
calculate genome coverage from scaffold coverage table
"""
size = {} # size[genome] = genome size
mapped = {} # mapped[genome][sample] = mapped bases
# parse coverage files
for line in open(cov_table):
line = line.strip().split('\t')
if line[0].startswith('#'):
samples = line[1:]
samples = [i.rsplit('/', 1)[-1].split('.', 1)[0] for i in samples]
continue
scaffold, length = line[0].split(': ')
length = float(length)
covs = [float(i) for i in line[1:]]
bases = [c * length for c in covs]
if scaffold not in scaffold2genome:
continue
genome = scaffold2genome[scaffold]
if genome not in size:
size[genome] = 0
mapped[genome] = {sample:0 for sample in samples}
# keep track of genome size
size[genome] += length
# keep track of number of mapped bases
for sample, count in zip(samples, bases):
mapped[genome][sample] += count
# calculate coverage from base counts and genome size
coverage = {'genome':[], 'genome size (bp)':[], 'sample':[], 'coverage':[]}
for genome, length in size.items():
for sample in samples:
cov = mapped[genome][sample] / length
coverage['genome'].append(genome)
coverage['genome size (bp)'].append(length)
coverage['sample'].append(sample)
coverage['coverage'].append(cov)
return | pd.DataFrame(coverage) | pandas.DataFrame |
from glob import glob
from astropy.io import fits
import pandas as pd
import numpy as np
from progressbar import ProgressBar
phoenix_bibtex = """
@ARTICLE{2013A&A...553A...6H,
author = {{<NAME>. and {<NAME>}, S. and {Dreizler}, S. and
{Homeier}, D. and {Reiners}, A. and {Barman}, T. and {Hauschildt}, P.~H.
},
title = "{A new extensive library of PHOENIX stellar atmospheres and synthetic spectra}",
journal = {\aap},
archivePrefix = "arXiv",
eprint = {1303.5632},
primaryClass = "astro-ph.SR",
keywords = {stars: atmospheres, convection, stars: late-type},
year = 2013,
month = may,
volume = 553,
eid = {A6},
pages = {A6},
doi = {10.1051/0004-6361/201219058},
adsurl = {http://adsabs.harvard.edu/abs/2013A%26A...553A...6H},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
"""
phoenix_meta = {'bibtex':phoenix_bibtex,
'parameters':['teff', 'logg', 'mh', 'alpha'],
'wavelength_unit':'Angstrom',
'wavelength_type':'vacuum',
'flux_unit': 'erg/s/cm^2/angstrom'}
def make_raw_index():
"""
Read all Phoenix files and generate a raw index with filename association.
Returns
-------
phoenix_index : pd.DataFrame
"""
all_fnames = glob('PHOENIX-ACES-AGSS-COND-2011/Z*/*.fits')
phoenix_index = pd.DataFrame(index=np.arange(len(all_fnames)), columns=['teff', 'logg', 'mh', 'alpha', 'filename'])
print("Reading Phoenix grid...")
progressbar = ProgressBar(max_value=len(all_fnames))
for i, fname in progressbar(enumerate(all_fnames)):
spec_header = fits.getheader(fname)
phoenix_index.iloc[i] = (spec_header['PHXTEFF'], spec_header['PHXLOGG'], spec_header['PHXM_H'],
spec_header['PHXALPHA'], fname)
return phoenix_index
def make_grid_info(fname):
"""
Make the HDF5 Grid Info file
Parameters
----------
fname: str
"""
raw_index = make_raw_index()
wavelength = fits.getdata('WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')
with pd.HDFStore(fname) as fh:
fh['index'] = raw_index
fh['wavelength'] = pd.DataFrame(wavelength)
fh['meta'] = | pd.Series(phoenix_meta) | pandas.Series |
from piper.custom import ratio
import datetime
import numpy as np
import pandas as pd
import pytest
from time import strptime
from piper.custom import add_xl_formula
from piper.factory import sample_data
from piper.factory import generate_periods, make_null_dates
from piper.custom import from_julian
from piper.custom import fiscal_year
from piper.custom import from_excel
from piper.custom import to_julian
from piper.verbs import across
# t_sample_data {{{1
@pytest.fixture
def t_sample_data():
return sample_data()
# test_add_xl_formula {{{1
def test_add_xl_formula(t_sample_data):
df = t_sample_data
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
expected = (367, )
assert expected == df.X7.shape
# test_across_str_date_single_col_pd_to_datetime {{{1
def test_across_str_date_single_col_pd_to_datetime():
''' '''
test = ['30/11/2019', '29/4/2019', '30/2/2019', '28/2/2019', '2019/4/30']
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', pd.to_datetime, format='%d/%m/%Y', errors='coerce')
assert exp.equals(got) == True
# test_across_str_date_single_col_lambda {{{1
def test_across_str_date_single_col_lambda():
''' '''
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', convert_date)
assert exp.equals(got) == True
# test_across_raise_column_parm_none_ValueError {{{1
def test_across_raise_column_parm_none():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, columns=None, function=convert_date)
assert exp.equals(got) == True
# test_across_raise_function_parm_none_ValueError {{{1
def test_across_raise_function_parm_none_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='dates', function=None)
# test_across_raise_Series_parm_TypeError {{{1
def test_across_raise_Series_parm_TypeError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(TypeError):
got = across(pd.Series(test), columns='dates', function=convert_date)
# test_across_raise_column_parm_ValueError {{{1
def test_across_raise_column_parm_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='invalid', function=convert_date)
# test_across_dataframe_single_column_with_lambda {{{1
def test_across_dataframe_single_column_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
got = across(df, columns='effective', function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_with_lambda {{{1
def test_across_dataframe_multiple_columns_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
got = across(df, columns=['effective', 'expired'], function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_raise_invalid_column {{{1
def test_across_dataframe_multiple_columns_raise_invalid_column():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
with pytest.raises(ValueError):
got = across(df, columns=['effective', 'invalid'], function=convert_date)
# test_dividing_numbers {{{1
def test_dividing_numbers():
''' '''
exp = 1
got = ratio(2, 2)
assert exp == got
# test_dividing_numbers_by_zero {{{1
def test_dividing_numbers_by_zero():
''' '''
exp = np.inf
got = ratio(2, 0)
assert exp == got
# test_dividing_numbers_floats {{{1
def test_dividing_numbers_floats():
''' '''
exp = 1.0
got = ratio(2.0, 2.0)
assert exp == got
# test_dividing_numbers_float_percent {{{1
def test_dividing_numbers_float_percent():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True)
assert exp == got
# test_dividing_numbers_float_percent_with_round {{{1
def test_dividing_numbers_float_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2.0, 2.0, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1.0, 2.0, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_int_percent_with_round {{{1
def test_dividing_numbers_int_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2, 2, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1, 2, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_percent_with_format {{{1
def test_dividing_numbers_percent_with_format():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True, format=True)
assert exp == got
# test_dividing_numbers_percent_with_precision_format {{{1
def test_dividing_numbers_percent_with_precision_format():
''' '''
exp = '66.66%'
got = ratio(1.3333, 2.0, percent=True,
precision=2, format=True)
assert exp == got
# test_dividing_by_two_series {{{1
def test_dividing_by_two_series():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 2, 3])
exp = pd.Series([10, 10, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_zero_denominator {{{1
def test_dividing_by_two_series_with_zero_denominator():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 0, 3])
exp = pd.Series([10, np.inf, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_decimals {{{1
def test_dividing_by_two_series_with_decimals():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1.3, 5.4, 3])
exp = (s1 / s2).round(2)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_rounding {{{1
def test_dividing_by_two_series_with_rounding():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1.3, 5.4, 3])
exp = (s1 / s2).round(2)
got = ratio(s1, s2, precision=2)
assert exp.equals(got)
exp = (s1 / s2).round(4)
got = ratio(s1, s2, precision=4)
assert exp.equals(got)
# test_dividing_by_two_series_with_format {{{1
def test_dividing_by_two_series_with_format():
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([100, 200, 300])
exp = pd.Series(['10.0%', '10.0%', '10.0%'])
got = ratio(s1, s2, precision=2, percent=True, format=True)
assert exp.equals(got)
# test_fiscal_year {{{1
def test_fiscal_year():
assert fiscal_year(pd.Timestamp('2014-01-01')) == 'FY 2013/2014'
assert fiscal_year(pd.to_datetime('2014-01-01')) == 'FY 2013/2014'
assert fiscal_year(pd.Timestamp('2014-01-01'), year_only=True) == 'FY 13/14'
assert fiscal_year(pd.to_datetime('2014-01-01'), year_only=True) == 'FY 13/14'
assert pd.isna(from_excel(np.nan)) == pd.isna(np.nan)
assert pd.isna(from_excel(pd.NaT)) == pd.isna(pd.NaT)
# test_from_excel_date {{{1
def test_from_excel_date():
assert from_excel(pd.Timestamp('2014-01-01 08:00:00')) == pd.Timestamp('2014-01-01 08:00:00')
assert from_excel('41640.3333') == pd.Timestamp('2014-01-01 08:00:00')
assert from_excel(41640.3333) == pd.Timestamp('2014-01-01 08:00:00')
assert from_excel(44001) == pd.Timestamp('2020-06-19 00:00:00')
assert from_excel('44001') == pd.Timestamp('2020-06-19 00:00:00')
assert from_excel(43141) == pd.Timestamp('2018-02-10 00:00:00')
assert from_excel('43962') == | pd.Timestamp('2020-05-11 00:00:00') | pandas.Timestamp |
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QTabWidget
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QTableWidget
from PyQt5 import QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
class LearnerTabs(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("Learning Analytics Dashboard")
tabwidget = QTabWidget()
tabwidget.addTab(SupportTab(), "Support")
tabwidget.addTab(AnalyticsTab(), "Analytics")
tabwidget.addTab(TrackerTab(), "Tracking")
vboxLayout = QVBoxLayout()
vboxLayout.addWidget(tabwidget)
self.setLayout(vboxLayout)
class SupportTab(QWidget):
def __init__(self):
super().__init__()
filenameLabel = QLabel("Name:")
fileNameEdit = QLineEdit()
dob = QLabel("Birth Date:")
dobedit = QLineEdit()
age = QLabel("Age:")
ageedit = QLineEdit()
PhoneNu = QLabel("Phone:")
phonedit = QLineEdit()
ftablayout = QVBoxLayout()
ftablayout.addWidget(filenameLabel)
ftablayout.addWidget(fileNameEdit)
ftablayout.addWidget(dob)
ftablayout.addWidget(dobedit)
ftablayout.addWidget(age)
ftablayout.addWidget(ageedit)
ftablayout.addWidget(PhoneNu)
ftablayout.addWidget(phonedit)
self.setLayout(ftablayout)
class AnalyticsTab(QWidget):
def __init__(self, parent=None):
super(AnalyticsTab, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
self.adjustSize()
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
self.module_idx = 0
self.tracker_idx = 0
self.comboBox = QComboBox()
self.comboBox.addItem('Registration Tracking Board')
self.comboBox.addItem('Learning Progress Tracking Board')
self.comboBox.addItem('Performance Tracking Board')
self.comboBox.addItem('Learning Behavior Tracking Board')
self.comboBox.currentIndexChanged.connect(self.select_tracker)
course_modules = ['AAA','BBB','CCC','DDD','EEE','FFF','GGG']
self.moduleBox = QComboBox()
for module in course_modules:
self.moduleBox.addItem(f'Course Module - {module}')
self.moduleBox.currentIndexChanged.connect(self.select_module)
# Just some button connected to `plot` method
self.button = QPushButton('Plot')
self.button.clicked.connect(self.plot)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.comboBox)
layout.addWidget(self.moduleBox)
layout.addWidget(self.button)
self.setLayout(layout)
def select_module(self, i):
self.module_idx = i
def select_tracker(self, i):
self.tracker_idx = i
def regi_hist(self,course):
df_student_regi = pd.read_csv("../../data/studentRegistration.csv")
group = df_student_regi.groupby(['code_module']).get_group(course)
ax = group['date_registration'].hist(cumulative=True, histtype='bar')
ax.set_xlabel('registration date (relative to day 0)')
ax.set_ylabel('learners (cumulative)')
ax.set_title(f'Course Module - {course}')
def progress_plot(self,course):
df_stu_assess = pd.read_csv('../../data/studentAssessment.csv')
df_assess = pd.read_csv('../../data/assessments.csv')
df_assess_merged = pd.merge(df_stu_assess, df_assess[['code_module', 'id_assessment']],
on='id_assessment', how='left')
ids = df_assess_merged[df_assess_merged['code_module'] == course]['id_assessment'].unique()
total = len(ids)
progress = []
students = df_assess_merged['id_student'].unique()
df_module = df_assess_merged[df_assess_merged['code_module']==course]
for s in students:
prog = df_module[df_module['id_student']==s]['id_assessment'].unique().shape[0]
for i in range(prog):
progress.append(i/total)
progress.sort(reverse=False)
ax = pd.Series(progress).hist(cumulative=False, histtype='bar', bins=6)
ax.set_xlabel('course progress in proportion')
ax.set_ylabel('number of learners')
ax.set_title(f'Course Module - {course}')
def grade_boxplot(self,course):
df_stu_assess = pd.read_csv('../../data/studentAssessment.csv')
df_assess = pd.read_csv('../../data/assessments.csv')
df_assess_merged = pd.merge(df_stu_assess, df_assess[['code_module', 'id_assessment']],
on='id_assessment', how='left')
df_course = df_assess_merged[df_assess_merged['code_module']==course]
ax = sns.boxplot(x="id_assessment", y="score", data=df_course)
ax.set_xlabel('course ID')
ax.set_ylabel('score')
ax.set_title(f'Course Module - {course}')
def plot(self):
self.figure.clear()
plots = [self.regi_hist, self.progress_plot, self.grade_boxplot]
course_modules = ['AAA','BBB','CCC','DDD','EEE','FFF','GGG']
plots[self.tracker_idx](course_modules[self.module_idx])
self.canvas.draw()
class TrackerTab(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.studentID = QLabel("Student ID: ")
self.studentID_input = QLineEdit()
self.module = QLabel("Course Module: ")
self.fileButton = QPushButton('Load File + Predict')
self.fileButton.clicked.connect(self.choosefile)
self.moduleBox = QComboBox()
course_modules = ['AAA','BBB','CCC','DDD','EEE','FFF','GGG']
for module in course_modules:
self.moduleBox.addItem(f'Course Module - {module}')
self.moduleBox.currentIndexChanged.connect(self.select_module)
self.button = QPushButton('Predict')
self.button.clicked.connect(self.predict)
self.result = QLabel()
self.module_idx = 0
self.data_file = ""
ftablayout = QGridLayout()
ftablayout.setSpacing(10)
ftablayout.addWidget(self.studentID, 1,0)
ftablayout.addWidget(self.studentID_input,1,1)
ftablayout.addWidget(self.module,2,0)
ftablayout.addWidget(self.moduleBox,2,1)
ftablayout.addWidget(self.button,3,0)
ftablayout.addWidget(self.fileButton, 3,1)
ftablayout.addWidget(self.result,4,1)
self.setLayout(ftablayout)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "",
"All Files (*);;Data Files (*.csv)",
options=options)
if fileName:
self.data_file = fileName
def select_module(self, i):
self.module_idx = i
def choosefile(self):
self.openFileNameDialog()
def predict(self):
studentId = self.studentID_input.text()
data = | pd.read_csv('../../data/visual_set.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import streamlit as st
import base64
import altair as alt
import datetime
from streamlit_option_menu import option_menu
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics as metrics
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, explained_variance_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from operator import itemgetter
st.set_page_config(layout="wide")
# Functions
# This function makes it possible to download the data from onedrive by creating a downloadable link
def create_onedrive_directdownload (onedrive_link):
data_bytes64 = base64.b64encode(bytes(onedrive_link, 'utf-8'))
data_bytes64_String = data_bytes64.decode('utf-8').replace('/','_').replace('+','-').rstrip("=")
resultUrl = f"https://api.onedrive.com/v1.0/shares/u!{data_bytes64_String}/root/content"
return resultUrl
# Cache is used to enhance speed. So the data is downloaded once even when the code is executed multiple times.
# Load function reads the csv file from onedrive
@st.cache(allow_output_mutation=True)
def load(url):
df = pd.read_csv(create_onedrive_directdownload(url))
return df
# Used to specify end date in case of empty end date input from user.
def set_default():
if not (ymd_range[0]):
# minimum
ymd_range[0] = datetime.date(2019, 1, 1)
elif not (ymd_range[1]):
# maximum
ymd_range[1] = datetime.date(2023, 12, 31)
def change_unit(data, column_name, quantity, unit):
data[[column_name]] = data[[column_name]].astype(float)
coef = 1
if quantity == "water level":
if unit == 'meter':
unit_symbol = 'm'
coef = 0.3048
else:
unit_symbol = 'ft'
elif quantity == "flow":
if unit == 'gallon per minute':
unit_symbol = 'gpm'
elif unit == 'cubic meter per second':
unit_symbol = 'm^3/sec'
coef = 0.0000630902
elif unit == 'cubic foot per second':
unit_symbol = 'ft^3/sec'
coef = 0.0022280093
elif unit == 'acre-foot per day':
unit_symbol = 'ac*ft/day'
coef = 0.0044191742
elif unit == 'acre-inch per hour':
unit_symbol = 'ac*in/hour'
coef = 0.0026536140977965
elif quantity == "pressure":
if unit == 'pressure per square inch':
unit_symbol = 'psi'
elif unit == 'meter of head':
unit_symbol = 'm'
coef = 0.70324961490205
elif unit == 'kilogram per square centimeter':
unit_symbol = 'kg/cm^2'
coef = 0.070307
elif quantity == "diameter":
if unit == 'mm':
unit_symbol = 'mm'
coef = 25.4
else:
unit_symbol = 'in'
elif ((quantity == "length") | (quantity == "ground water depth")):
if unit == 'meter':
unit_symbol = 'm'
coef = 0.3048
else:
unit_symbol = 'ft'
elif quantity == "discharge":
if unit == 'liter per second':
unit_symbol = 'lps'
coef = 0.0631
else:
unit_symbol = 'gpm'
elif quantity == "water usage":
if water_usage_unit == 'kilogallon':
unit_symbol = 'kgal'
coef = 0.0631
elif unit == 'gallon':
unit_symbol = 'gal'
coef = 1000
elif unit == 'cubic meter':
unit_symbol = 'm^3'
coef = 3.79
elif unit == 'cubic foot':
unit_symbol = 'ft^3'
coef = 133.68
elif unit == 'centum cubic foot':
unit_symbol = 'ccf'
coef = 1.34
data[[column_name]] = data[[column_name]] * coef
return data, unit_symbol
def mask_year(df, selected_years):
selected_years = [int(i) for i in selected_years]
df['year'] = df['Time'].dt.year
mask = df['year'] == 0
for i in selected_years:
mask = (mask | (df['year'].astype(int) == i))
df = df.loc[mask]
return df
def rename_months(df):
df['month'] = df['month'].astype(str)
df['month'] = df['month'].replace(
{"1": "January", "2": "February", "3": "March", "4": "April", "5": "May", "6": "June",
"7": "July", "8": "August", "9": "September", "10": "October", "11": "November",
"12": "December"})
return df
def mask_month(df, selected_months):
selected_months_numeric = []
df['month'] = df['Time'].dt.month
mask = df['month'] == 0
for i in selected_months:
selected_months_numeric.append(numeric_month[i])
for i in selected_months_numeric:
mask = (mask | (df['month'].astype(int) == i))
df = df.loc[mask]
df = rename_months(df)
return df
def def_line_chart(data, x, y, scale_zero_x, scale_zero_y, color, legend_title, condition1_val, condition2_val,
selection, tooltip, width, height):
chart = alt.Chart(data).mark_line().encode(
alt.X(x, scale=alt.Scale(zero=scale_zero_x)),
alt.Y(y, scale=alt.Scale(zero=scale_zero_y)),
alt.Color(color, legend=alt.Legend(title=legend_title)),
opacity=alt.condition(selection, condition1_val, condition2_val),
tooltip=tooltip
).properties(
width=width, height=height
).interactive().add_selection(
selection
)
return chart
def def_bars(data, x, y, scale_zero_x, label_y, color, selection, extent):
bars = alt.Chart(data).mark_bar().encode(
x=alt.X(f"mean({x})", scale=alt.Scale(zero=scale_zero_x)),
y=alt.Y(y, axis=alt.Axis(labels=label_y)),
color=alt.Color(color),
).transform_filter(selection)
error_bars = alt.Chart().mark_errorbar(extent=extent).encode(
x=alt.X(f"mean({x})", scale=alt.Scale(zero=scale_zero_x)),
y=y
)
bars = alt.layer(bars, error_bars, data=data).transform_filter(selection)
return bars
def def_distribution_chart(data, selected_asset, x, y, color, legend, condition1_value, condition2_value, selection,
width_point, height_point, y_bar, width_bar, height_bar, bin, stack, maxbins):
# left panel: scatter plot
points = alt.Chart(data).mark_point(filled=False).encode(
x=alt.X(x, scale=alt.Scale(zero=False)),
y=alt.Y(y, scale=alt.Scale(zero=False)),
color=alt.Color(color, legend=alt.Legend(title=legend)),
opacity=alt.condition(selection, condition1_value, condition2_value)
).add_selection(selection).properties(
width=width_point,
height=height_point
)
st.subheader(f"{selected_asset}")
# right panel: histogram
mag = (alt.Chart(data).mark_bar().encode(
x=alt.X(y, bin=bin),
y=alt.Y(y_bar, stack=stack),
color=alt.Color(color, legend=alt.Legend(title=legend)),
).properties(
width=width_bar,
height=height_bar
).transform_filter(
selection
))
chart = alt.hconcat(points, mag).transform_bin(f"{y} binned",
field=y, bin=alt.Bin(maxbins=maxbins))
return chart
# Main Code
df = load("https://1drv.ms/u/s!AnhaxtVMqKpxgolL9YaQaQcQqgtxBQ?e=xRNBhX")
df['Time'] = | pd.to_datetime(df['Time'], errors='coerce') | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ...pvtpy.black_oil import Pvt,Oil,Water,Gas
from scipy.optimize import root_scalar
from .inflow import OilInflow, GasInflow
from ...utils import intercept_curves
from typing import Union
## Incompressible pressure drop
def potential_energy_change(
z1:Union[int,float]=None,
z2=None,
delta_z=None,
length=None,
ge=1,
angle=None,
inc=None,
p1=0):
"""potential_energy_change [ Δp PE accounts for the pressure change due to the weight of the column of fluid (the hydrostatic head); it
will be zero for flow in a horizontal pipe.
In this equation, Δz is the difference in elevation between positions 1 and 2, with z increasing upward. θ
is defined as the angle between horizontal and the direction of flow. Thus, θ is +90° for upward, vertical
flow, 0° for horizontal flow, and –90° for downward flow in a vertical well (Figure 7-4). For flow in a
straight pipe of length L with flow direction θ,]
Parameters
----------
z1 : [type], optional
[description], by default None
z2 : [type], optional
[description], by default None
delta_z : [type], optional
[description], by default None
length : [type], optional
[description], by default None
ge : int, optional
[description], by default 1
angle : [type], optional
[description], by default None
inc : [type], optional
[description], by default None
p1 : int, optional
[description], by default 0
Returns
-------
[type]
[description]
"""
# Assert height difference types
if delta_z is None:
if length is None:
assert isinstance(z1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(z2,(float,int,np.ndarray,np.int64,np.float64)), f"{type(z1)} {type(z2)}"
z1 = np.atleast_1d(z1)
z2 = np.atleast_1d(z2)
#assert z1.shape == (1,) and z2.shape == (1,)
delta_z = z1-z2
else:
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#assert length.shape == (1,)
if angle is None:
assert isinstance(inc,(float,int,np.ndarray,np.int64,np.float64))
inc = np.atleast_1d(inc)
assert inc <= 90 and inc >= -90
sign = np.sign(inc)
angle = (90 - np.abs(inc)) * sign
else:
# Assert angle between -90 and 90
assert isinstance(angle,(float,int,np.ndarray,np.int64,np.float64))
angle = np.atleast_1d(angle)
assert angle <= 90 and angle >= -90
delta_z = length * np.sin(np.radians(angle))
else:
assert isinstance(delta_z,(float,int,np.ndarray,np.int64,np.float64))
delta_z = np.atleast_1d(delta_z)
#assert delta_z.shape == (1,)
#Assert ge be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0, f"{ge} {type(ge)} not allowed"
#Calculate Delta P
delta_p = 0.433 * ge * delta_z
#Calculate P2
p2 = p1 + delta_p
return delta_p, p2
def kinetic_energy_change(d1=None,d2=None, ge=1,rate=None,p1=0):
"""
Δp KE is the pressure drop resulting from a change in the velocity of the fluid between positions 1 and 2.
It will be zero for an incompressible fluid unless the cross-sectional area of the pipe is different at the
two positions of interest.
Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 172
"""
assert isinstance(d1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(d2,(float,int,np.ndarray,np.int64,np.float64))
d1 = np.atleast_1d(d1)
d2 = np.atleast_1d(d2)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Estimate delta Pressure in psi
delta_p = 1.53e-8 * np.power(rate,2) * rho * ((1/np.power(d1,4))-(1/np.power(d2,4)))
p2 = p1 + delta_p
return delta_p, p2
def reynolds_number(rate,rho,d,mu):
"""
Reynolds Number where q is in bbl/d, ρ in lb m /ft 3 , D in in., and μ in cp.
"""
nre = (1.48 * rate * rho) / (d * mu)
return nre
def frictional_pressure_drop(
rate=None,
epsilon=0.001,
ge=1,
d=None,
mu=1,
length=None):
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
# pipe relative roughness
assert isinstance(epsilon,(float,int,np.ndarray,np.int64,np.float64))
epsilon = np.atleast_1d(epsilon)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
assert isinstance(d,(float,int,np.ndarray,np.int64,np.float64))
d = np.atleast_1d(d)
assert isinstance(mu,(float,int,np.ndarray,np.int64,np.float64))
mu = np.atleast_1d(mu)
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Reynolds Number
nre = reynolds_number(rate,rho,d,mu)
#Friction Factor
if nre == 0:
ff = 0
else:
ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Velocity ft/s
u = (4*rate*5.615)/(np.pi*np.power(d/12,2)*86400)
delta_p = (2 * ff * rho * np.power(u,2) * length)/(32.17 * (d/12) * 144)
delta_p *= -1
return delta_p
def one_phase_pressure_profile(
p1=0,
ge=1,
epsilon=0.001,
md=None,
tvd=None,
d = None,
rate = None,
mu=None,
backwards=1
):
assert isinstance(md,(int,float,list,np.ndarray))
md = np.atleast_1d(md)
if tvd is None:
tvd = md
else:
assert isinstance(tvd,(int,float,list,np.ndarray))
tvd = np.atleast_1d(tvd)
assert isinstance(d,(int,float,list,np.ndarray))
if isinstance(d,(int,float)):
d = np.full(md.shape,d)
else:
d = np.atleast_1d(d)
assert isinstance(rate,(int,float, np.ndarray))
rate = np.atleast_1d(rate)
assert isinstance(mu,(int,float, np.ndarray))
mu = np.atleast_1d(mu)
assert isinstance(p1,(int,float, np.ndarray))
p1 = np.atleast_1d(p1)
assert isinstance(ge,(int,float, np.ndarray))
ge = np.atleast_1d(ge)
assert isinstance(epsilon,(int,float, np.ndarray))
epsilon = np.atleast_1d(epsilon)
assert md.shape[0] == tvd.shape[0] == d.shape[0]
n = md.shape[0]
#Create arrays
pressure = np.zeros(n)
ppe = np.zeros(n)
pke = np.zeros(n)
pf = np.zeros(n)
delta_p = np.zeros(n)
gradient = np.zeros(n)
pressure[0] = p1
for i in range(1,n):
#Potential Energy Change
ppe[i], _ = potential_energy_change(
z1=tvd[i-1],
z2=tvd[i],
ge= ge,
)
#Kinetic Energy Change
pke[i], _ = kinetic_energy_change(
d1=d[i-1],
d2=d[i],
rate=rate,
ge=ge,
)
#Frictional Pressure drop
pf[i] = frictional_pressure_drop(
rate=rate,
epsilon=epsilon,
ge=ge,
d=d[i],
mu=mu,
length=np.abs(md[i-1]-md[i])
) * backwards
delta_p[i] = ppe[i] + pke[i] + pf[i]
pressure[i] = pressure[i-1] + delta_p[i]
gradient[i] = (pressure[i] - pressure[i-1])/np.abs(tvd[i] - tvd[i-1])
# Create dataframe
pressure_profile = pd.DataFrame({
'md':md,
'tvd':tvd,
'diameter':d,
'pressure':pressure,
'ppe': ppe,
'pke': pke,
'pf' : pf,
'delta_p': delta_p,
'gradient': gradient
}).set_index('md')
p2 = pressure[-1]
return pressure_profile, p2
## Gas Outflow functions
def gas_pressure_profile_correlation(thp,sg,depth):
assert isinstance(thp,(float,int,np.ndarray,np.int64,np.float64))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(sg,(float,int,np.ndarray,np.int64,np.float64))
sg = np.atleast_1d(sg)
assert sg.shape == (1,)
assert isinstance(depth,(list,float,int,np.ndarray))
depth = np.atleast_1d(depth)
assert sg.ndim == 1
pwf = thp*np.exp(3.47e-5*depth)
return pwf
def gas_pressure_profile(
md = None,
inc = None,
thp = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20):
"""
To calculate the pressure drop in a gas well, the compressibility of the fluid must be considered. When
the fluid is compressible, the fluid density and fluid velocity vary along the pipe, and these variations
must be included when integrating the mechanical energy balance equation.
Petroleum Production Systems, Economides. Chapter 7 7.3. Single-Phase Flow of a Compressible, Newtonian Fluid. Page 175
"""
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series))
md = np.atleast_1d(md)
assert md.ndim ==1
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
thp = np.atleast_1d(thp)
assert thp.shape == (1,)
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, (int,float,np.ndarray))
if isinstance(di,np.ndarray):
assert di.shape == md.shape
else:
di = np.full(md.shape,di)
assert isinstance(rate, (int,float,np.ndarray))
rate = np.atleast_1d(rate)
assert rate.shape == (1,)
assert gas_obj.sg is not None
#Create the variables
pressure_profile = np.zeros(md.shape)
temperature_profile = np.zeros(md.shape)
pressure_gradient = np.zeros(md.shape)
pressure_profile[0] = thp
temperature_profile[0] = surf_temp
interations = np.zeros(md.shape)
if gas_obj.chromatography is not None:
df_rho = gas_obj.chromatography.get_rhog(p=thp,t=surf_temp, rhog_method='real_gas')
else:
df_rho = gas_obj.pvt.interpolate(thp,property='rhog')
grad_guess = df_rho['rhog'].values*(0.433/62.4)
#Loop over depth
for i in range(1,md.shape[0]):
err = tol + 0.01
dz = np.sin(angle[i])*(md[i]-md[i-1])
gas_sg = gas_obj.sg
it = 0
while err>= tol and it <= max_iter:
p_guess = grad_guess*(md[i]-md[i-1])*np.sin(angle[i]) + pressure_profile[i-1]
#Interpolate pvt
df_pvt = gas_obj.pvt.interpolate(p_guess)
#Reynolds Number
#nre = (4*28.97*gas_obj.sg*rate*14.7)/(np.pi*di[i]*df_pvt['mug'].values*10.73*520)
nre = 20.09*(gas_sg*rate)/(di[i]*df_pvt['mug'].values)
#Friction Factor
friction = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Temperature
temperature_profile[i] = dz * (temp_grad/100) + temperature_profile[i-1]
#S
s = (-0.0375*gas_obj.sg*dz)/(df_pvt['z'].values*(temperature_profile[i]+460))
#Calculate next pressure by parts for easily read
a = np.exp(-s) * np.power(pressure_profile[i-1],2)
b = (friction*np.power(df_pvt['z'].values*(temperature_profile[i]+460)*rate,2))/(np.sin(angle[i])*np.power(di[i],5))
c = 1 - np.exp(-s)
p_new = np.sqrt(a - (2.685e-3*b*c))
grad_new = (p_new - pressure_profile[i-1])/dz
err = np.abs(grad_guess-grad_new)/grad_new
grad_guess = grad_new
it +=1
pressure_gradient[i] = grad_new
pressure_profile[i] = p_new
interations[i] = it
df_dict = {
'pressure':pressure_profile,
'pressure_gradient': pressure_gradient,
'temperature': temperature_profile,
'iterations': interations
}
df = pd.DataFrame(df_dict, index = md)
pwf = pressure_profile[-1]
return df, pwf
def gas_upward_pressure(
md = None,
inc = None,
pwf = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
guess=None,
grad_guess = [0.02,0.05]
):
if guess is None:
grad = np.atleast_1d(grad_guess)
delta_h = np.abs(md[-1] - md[0])
guess = pwf - grad * delta_h
else:
assert isinstance(guess,(list,np.ndarray))
guess = np.atleast_1d(guess)
def solve(x):
_,_pwf = gas_pressure_profile(
md = md,
inc = inc,
thp = x,
rate = rate,
gas_obj = gas_obj,
di=di,
surf_temp=surf_temp,
temp_grad=temp_grad,
epsilon = epsilon,
tol = tol,
max_iter=max_iter,
)
return pwf - _pwf
sol = root_scalar(solve, x0=guess[0],x1=guess[1])
return sol.root
def gas_outflow_curve(
md = None,
inc = None,
thp = None,
gas_obj = None,
rate=None,
min_rate=100,
max_rate=8000,
n_rate=20,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
operating_point = None,
op_n = 30
):
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series)) and md.ndim ==1
md = np.atleast_1d(md)
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,float,list,np.ndarray))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, list)
assert isinstance(rate, (int,float,list,np.ndarray,type(None)))
if rate is None:
rate = np.linspace(min_rate,max_rate,n_rate)
else:
rate = np.atleast_1d(rate)
assert rate.ndim == 1
assert gas_obj.sg is not None
pwf = np.zeros(rate.shape[0]*thp.shape[0]*len(di))
thp_arr = np.zeros(pwf.shape)
di_arr = np.zeros(pwf.shape)
gas_arr = np.zeros(pwf.shape)
name_list = []
i = 0
for p in thp:
for d in di:
for q in rate:
_,pwf[i] = gas_pressure_profile(
md = md,
inc = inc,
thp = p,
rate = q,
gas_obj = gas_obj,
surf_temp=surf_temp,
temp_grad=temp_grad,
di=d
)
gas_arr[i] = q
thp_arr[i] = p
di_arr[i] = d
case_name = f'thp-{p}_di-{d}'
name_list.append(case_name)
i += 1
#df = pd.DataFrame(pwf,columns=name_list,index=rate)
arr=np.column_stack((pwf,thp_arr,di_arr))
df = | pd.DataFrame(arr,columns=['pwf','thp','di'],index=gas_arr) | pandas.DataFrame |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, True),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
# (cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), True),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
# (cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True),
# (cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_struct_dtype(obj, expect):
# TODO: All inputs of interval types are currently disabled due to
# inconsistent behavior of is_struct_dtype for interval types that will be
# fixed as part of the array refactor.
assert types.is_struct_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
( | pd.Series(dtype="timedelta64[s]") | pandas.Series |
import pandas as pd
# static variables
tool1 = 'Polyphen2'
tool2 = 'PROVEAN'
tool3 = 'SIFT'
# clean all the redundant whitespace in the generated file, then output it
def clean_pph2_data():
with open('pph2-full.txt', 'r') as file:
for line in file:
if line.startswith('##'): # ignore the comments
continue
res = []
values = line.strip().split('\t')
for value in values:
res.append(value.strip())
f_name = open('pph2-cleaned.txt', 'a+') # use 'a+' to avoid file overwrite
print('\t'.join(x for x in res), file=f_name)
print('done clean pph2')
# load data files and sort them out
def sort_data():
# load Polyphen2 data
pph2_col_names = ['#o_acc', 'o_pos', 'o_aa1', 'o_aa2', 'pph2_class']
pph2 = pd.read_table('pph2-cleaned.txt', sep='\t', usecols=pph2_col_names)
pph2.columns = ['protein', 'position', 'origin', 'mutated', tool1]
pph2[tool1] = pph2[tool1].str.capitalize()
# load SIFT and PROVEAN data
col_names = ['PROTEIN_ID', 'POSITION', 'RESIDUE_REF',
'RESIDUE_ALT', 'PREDICTION (cutoff=-2.5)', 'PREDICTION (cutoff=0.05)']
# the PROVEAN batch process only allow files smaller than 1MB to be uploaded,
# so the data set was divided into four
table1 = pd.read_csv("DataAnalysis/provean1.tsv", sep='\t', usecols=col_names)
table2 = pd.read_csv("DataAnalysis/provean2.tsv", sep='\t', usecols=col_names)
table3 = | pd.read_csv("DataAnalysis/provean3.tsv", sep='\t', usecols=col_names) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from glob import glob
from typing import Any, List, Dict, Optional, Tuple
def load_single_feed(fullpath: str):
df = pd.read_csv(fullpath)
df["first_seen"] = (
pd.to_datetime(df["first_seen"]).values.astype(np.int64) // 10 ** 9
)
df["last_seen"] = pd.to_datetime(df["last_seen"]).values.astype(np.int64) // 10 ** 9
return df
def load_feeds(path: str) -> List[Dict[str, Any]]:
"""
Read all feeds from the specified
directory, you cat get the result
by using generator expression like
[x for x in get_feeds()]
"""
filenames = glob(f"{path}/*.csv")
return [
{"name": os.path.basename(df), "df": load_single_feed(df)} for df in filenames
]
def load_feed_statistics(path: str, name=".feeds-statistics") -> pd.DataFrame:
"""
Read feeds statistics from file
"""
FEEDS_STATS_FILE: str = os.path.join(path, name)
return pd.read_csv(FEEDS_STATS_FILE, index_col="feed_name")
def load_iocs_statistics(path: str, name=".iocs-statistics") -> pd.DataFrame:
"""
Read iocs statistics from file
"""
IOCS_STATS_FILE: str = os.path.join(path, name)
return pd.read_csv(IOCS_STATS_FILE, index_col="value")
def load_whole_feeds(path: str):
return pd.concat(feed["df"] for feed in load_feeds(path))
def write_statistics(
path: Optional[str], **df: Dict[str, Any]
) -> Optional[Tuple[str, str]]:
if path:
IOCS_STATS_FILE: str = os.path.join(path, ".iocs-statistics")
FEEDS_STATS_FILE: str = os.path.join(path, ".feeds-statistics")
pd.DataFrame(df["iocs"]).to_csv(IOCS_STATS_FILE)
| pd.DataFrame(df["feeds"]) | pandas.DataFrame |
# python 2
try:
from urllib.request import Request, urlopen
# Python 3
except ImportError:
from urllib2 import Request, urlopen
import pandas as pd
import time
import datetime
import numpy as np
import re
import json
from bs4 import BeautifulSoup
from pytrends.request import TrendReq
class Cryptory():
def __init__(self, from_date, to_date=None, ascending=False,
fillgaps=True, timeout=10.0):
"""Initialise cryptory class
Parameters
----------
from_date : the starting date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
to_date : the end date (as string) for the returned data;
required format is %Y-%m-%d (e.g. "2017-06-21")
Optional. If unspecified, it will default to the current day
to_date : binary. Determines whether the returned dataframes are
ordered by date in ascending or descending order
(defaults to False i.e. most recent first)
fillgaps : binary. When data does not exist (e.g. weekends for stocks)
should the rows be filled in with the previous available data
(defaults to True e.g. Saturday stock price will be same as Friday)
fillgaps : float. The max time allowed (in seconds) to pull data from a website
If exceeded, an timeout error is returned. Default is 10 seconds.
"""
self.from_date = from_date
# if to_date provided, defaults to current date
if to_date is None:
self.to_date = datetime.date.today().strftime("%Y-%m-%d")
else:
self.to_date = to_date
self.ascending = ascending
self.fillgaps = fillgaps
self.timeout = timeout
self._df = pd.DataFrame({'date':pd.date_range(start=self.from_date, end=self.to_date)})
def extract_reddit_metrics(self, subreddit, metric, col_label="", sub_col=False):
"""Retrieve daily subscriber data for a specific subreddit scraped from redditmetrics.com
Parameters
----------
subreddit : the name of subreddit (e.g. "python", "learnpython")
metric : the particular subscriber information to be retrieved
(options are limited to "subscriber-growth" (daily change),
'total-subscribers' (total subscribers on a given day) and
'rankData' (the position of the subreddit on reddit overall)
'subscriber-growth-perc' (daily percentage change in subscribers))
col_label : specify the title of the value column
(it will default to the metric name with hyphens replacing underscores)
sub_col : whether to include the subreddit name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
if metric not in ['subscriber-growth', 'total-subscribers', 'rankData', 'subscriber-growth-perc']:
raise ValueError(
"Invalid metric: must be one of 'subscriber-growth', " +
"'total-subscribers', 'subscriber-growth-perc', 'rankData'")
url = "http://redditmetrics.com/r/" + subreddit
if metric == 'subscriber-growth-perc':
metric_name = 'total-subscribers'
else:
metric_name = metric
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
if metric == 'rankData':
start_segment = parsed_page.find(metric)
else:
start_segment = parsed_page.find("element: '"+metric_name+"'")
if start_segment != -1:
start_list = parsed_page.find("[", start_segment)
end_list = parsed_page.find("]", start_list)
parsed_page = parsed_page[start_list:end_list + 1]
else:
return pd.DataFrame({"error":"Could not find that subreddit"}, index=[0])
parsed_page = parsed_page.replace("'", '"')
parsed_page = parsed_page.replace('a', '\"subscriber_count\"')
parsed_page = parsed_page.replace('y', '\"date\"')
output = json.loads(parsed_page)
output = pd.DataFrame(output)
output['date'] = pd.to_datetime(output['date'], format="%Y-%m-%d")
if metric == 'subscriber-growth-perc':
output['subscriber_count'] = output['subscriber_count'].pct_change()
output = output[(output['date']>=self.from_date) & (output['date']<=self.to_date)]
output = output.sort_values(by='date', ascending=self.ascending).reset_index(drop=True)
if sub_col:
output['subreddit'] = subreddit
if col_label != "":
output = output.rename(columns={'subscriber_count': label})
else:
output = output.rename(columns={'subscriber_count': metric.replace("-","_")})
return output
def extract_coinmarketcap(self, coin, coin_col=False):
"""Retrieve basic historical information for a specific cryptocurrency from coinmarketcap.com
Parameters
----------
coin : the name of the cryptocurrency (e.g. 'bitcoin', 'ethereum', 'dentacoin')
coin_col : whether to include the coin name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
try:
output = pd.read_html("https://coinmarketcap.com/currencies/{}/historical-data/?start={}&end={}".format(
coin, self.from_date.replace("-", ""), self.to_date.replace("-", "")))[0]
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
output = output.assign(Date=pd.to_datetime(output['Date']))
for col in output.columns:
if output[col].dtype == np.dtype('O'):
output.loc[output[col]=="-",col]=0
output[col] = output[col].astype('int64')
output.columns = [col.lower() for col in output.columns]
if coin_col:
output['coin'] = coin
return output
def extract_bitinfocharts(self, coin, metric="price", coin_col=False, metric_col=False):
"""Retrieve historical data for a specific cyrptocurrency scraped from bitinfocharts.com
Parameters
----------
coin : the code of the cryptocurrency (e.g. 'btc' for bitcoin)
full range of available coins can be found on bitinfocharts.com
metric : the particular coin information to be retrieved
(options are limited to those listed on bitinfocharts.com
including 'price', 'marketcap', 'transactions' and 'sentinusd'
coin_col : whether to include the coin name as a column
(default is False i.e. the column is not included)
metric_col : whether to include the metric name as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
if coin not in ['btc', 'eth', 'xrp', 'bch', 'ltc', 'dash', 'xmr', 'btg', 'etc', 'zec',
'doge', 'rdd', 'vtc', 'ppc', 'ftc', 'nmc', 'blk', 'aur', 'nvc', 'qrk', 'nec']:
raise ValueError("Not a valid coin")
if metric not in ['transactions', 'size', 'sentbyaddress', 'difficulty', 'hashrate', 'price',
'mining_profitability', 'sentinusd', 'transactionfees', 'median_transaction_fee',
'confirmationtime', 'marketcap', 'transactionvalue', 'mediantransactionvalue',
'tweets', 'activeaddresses', 'top100cap']:
raise ValueError("Not a valid bitinfocharts metric")
new_col_name = "_".join([coin, metric])
parsed_page = Request("https://bitinfocharts.com/comparison/{}-{}.html".format(metric, coin),
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'})
try:
parsed_page = urlopen(parsed_page, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
start_segment = parsed_page.find("new Dygraph")
if start_segment != -1:
start_list = parsed_page.find('[[', start_segment)
end_list = parsed_page.find(']]', start_list)
parsed_page = parsed_page[start_list:end_list]
else:
return pd.DataFrame({"error":"Could not find the appropriate text tag"}, index=[0])
parsed_page = parsed_page.replace('new Date(', '')
parsed_page = parsed_page.replace(')', '')
parsed_page = parsed_page.replace('null', '0')
parsed_page = parsed_page.replace('["', '{"date":"')
parsed_page = parsed_page.replace('",', '","{}":'.format(new_col_name))
parsed_page = parsed_page.replace('],', '},')
parsed_page = parsed_page + '}]'
output = json.loads(parsed_page)
output = pd.DataFrame(output)
output['date'] = pd.to_datetime(output['date'], format="%Y-%m-%d")
output = output[(output['date']>=self.from_date) & (output['date']<=self.to_date)]
# for consistency, put date column first
output = output[['date', new_col_name]]
if coin_col:
output['coin'] = coin
if metric_col:
output['metric'] = metric
return output.sort_values(by='date', ascending=self.ascending).reset_index(drop=True)
def extract_poloniex(self, coin1, coin2, coin1_col=False, coin2_col=False):
"""Retrieve the historical price of one coin relative to another (currency pair) from poloniex
Parameters
----------
coin1 : the code of the denomination cryptocurrency
(e.g. 'btc' for prices in bitcoin)
coin2 : the code for the coin for which prices are retrieved
(e.g. 'eth' for ethereum)
coin1_col : whether to include the coin1 code as a column
(default is False i.e. the column is not included)
coin2_col : whether to include the coin2 code as a column
(default is False i.e. the column is not included)
Returns
-------
pandas Dataframe
"""
from_date = int(time.mktime(time.strptime(self.from_date, "%Y-%m-%d")))
to_date = int(time.mktime(time.strptime(self.to_date, "%Y-%m-%d")))
url = "https://poloniex.com/public?command=returnChartData¤cyPair={}_{}&start={}&end={}&period=86400".format(
coin1.upper(), coin2.upper(), from_date, to_date)
try:
parsed_page = urlopen(url, timeout=self.timeout).read()
parsed_page = parsed_page.decode("utf8")
except Exception as e:
return pd.DataFrame({"error":e}, index=[0])
output = json.loads(parsed_page)
if isinstance(output, dict):
if 'error' in list(output.keys()):
return pd.DataFrame(output, index=[0])
output = pd.DataFrame(output)
# more intuitive column order
output = output[['date', 'close', 'open', 'high', 'low',
'weightedAverage', 'quoteVolume', 'volume']]
output['date'] = | pd.to_datetime(output['date'], unit='s') | pandas.to_datetime |
"""stuff to help with computing the noise ceiling
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from torch.utils import data as torchdata
from . import model as sfp_model
def sample_df(df, seed=0,
df_filter_string='drop_voxels_with_any_negative_amplitudes,drop_voxels_near_border',
is_simulated=False,
mode='individual'):
"""Sample df to get info for necessary computing Monte Carlo noise ceiling
This is the df we use to compute the monte carlo noise ceiling,
where we're comparing the amplitude estimates computed for different
bootstraps on the full data. We pick two bootstraps (without
replacement), and query the dataframe to grab only these
bootstraps. One of them becomes the feature and one the
target. Because this uses all the data, it does not need to be
corrected to compare against our actual models. In this file, we
also provide functionality to compute the split-half noise ceiling,
where we're comparing the amplitude estimates computed on two
separate halves of the data (which does need a correction).
Parameters
----------
df : pd.DataFrame
The full df created by first_level_analysis for the GLMdenoise
run on all the data
seed : int
random seed to use (used to set numpy's RNG)
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
is_simulated : bool, optional
Whether this is simulated data or actual data (changes which columns we
merge on).
mode : {'individual', 'all'}, optional
Whether to compare the selection of two individual bootstrap
('individual') or two selections of 100 bootstraps (with replacement,
'all')
Returns
-------
df : pd.DataFrame
re-sampled dataframe with one row per (voxel, stimulus) pair,
where row has two values for the columns: bootstrap_num,
amplitude_estimate, amplitude_estimate_norm, and
amplitude_estimate_normed (with suffixes "_1" and "_2"), where
the two values come from two separate bootstraps (all
bootstrap_num_1 vals will be identical, as will all
bootstrap_num_2).
"""
if df_filter_string is not None:
df_filter = sfp_model.construct_df_filter(df_filter_string)
df = df_filter(df).reset_index()
np.random.seed(seed)
if mode == 'individual':
bootstraps = np.random.choice(100, 2, False)
tmp = [df.query("bootstrap_num == @b") for b in bootstraps]
elif mode == 'all':
bootstraps = np.random.choice(100, (2, 100), True)
tmp = [df.query("bootstrap_num in @b") for b in bootstraps]
# stimulus_superclas and hemi are redundant with stimulus_class and
# voxel, respectively, but they're string-valued, which gets dropped by
# median(). so we include them here to preserve them.
tmp = [t.groupby(['varea', 'voxel', 'stimulus_superclass',
'stimulus_class', 'hemi']).median().reset_index()
for t in tmp]
# then combine_dfs
if not is_simulated:
cols = ['varea', 'voxel', 'stimulus_superclass', 'w_r', 'w_a', 'eccen', 'angle',
'stimulus_class', 'hemi', 'sigma', 'prf_vexpl', 'phi', 'res', 'stimulus_index',
'freq_space_angle', 'freq_space_distance', 'rounded_freq_space_distance', 'local_w_x',
'local_w_y', 'local_w_r', 'local_w_a', 'local_sf_magnitude', 'local_sf_xy_direction',
'local_sf_ra_direction', 'precision', 'baseline', 'GLM_R2']
else:
cols = ['varea', 'voxel', 'eccen', 'angle', 'stimulus_class',
'local_sf_magnitude', 'local_sf_xy_direction', 'noise_level',
'noise_source_df', 'period_orientation_type', 'eccentricity_type',
'amplitude_orientation_type', 'precision']
cols += [c for c in df.columns if c.startswith('true_m')]
df = pd.merge(*tmp, on=cols, suffixes=['_1', '_2'], validate='1:1')
df['noise_ceiling_seed'] = seed
return df
def combine_dfs(first_half, second_half, all_data):
"""combine dfs to get all the info necessary for computing split-half noise ceiling
This is the df we use to compute the split-half noise ceiling, where
we're comparing the amplitude estimates computed on two separate
halves of the data. Split-half noise ceiling needs a correction to
account for the fact that, unlike the actual models we fit, it only
uses half the data. In this file, we also provide functionality to
compute the Monte Carlo noise ceiling, where we sample from the
existing distribution of amplitude estimates we get when fitting
GLMdenoise to the full dataset (because we use bootstraps across
runs to estimate the variability of the amplitude estimates). Monte
Carlo noise ceiling does not need this dataset-size correction
We want our dataset to only take a single df as the input, as our
FirstLevelDataset does. However, the info required for this analysis
is contained in several different dfs, so this function combines
them. We want the two amplitude estimates (from the split halves)
and then we want the precision from the data fit to all the data.
We merge the two halves, combining them on the various identifying
columns (varea, voxel, stimulus_class, frequency info related to the
stimuli), and keeping the ones related to the GLM fit separate
(amplitude_estimate, precision, etc.); these will all have the
suffix "_1" (from first_half) and "_2" (from second_half). We add a
new column, 'overall_precision', which contains the precision from
all_data
Parameters
----------
first_half : pd.DataFrame
The summary df created by first_level_analysis for the
GLMdenoise run on one half of the runs
second_half : pd.DataFrame
The summary df created by first_level_analysis for the
GLMdenoise run on the second half of the runs
all_data : pd.DataFrame
The summary df created by first_level_analysis for the
GLMdenoise run on all runs
Returns
-------
df : pd.DataFrame
The merged dataframe
"""
cols = ['varea', 'voxel', 'stimulus_superclass', 'w_r', 'w_a', 'eccen', 'angle',
'stimulus_class', 'hemi', 'sigma', 'prf_vexpl', 'phi', 'res', 'stimulus_index',
'freq_space_angle', 'freq_space_distance', 'rounded_freq_space_distance', 'local_w_x',
'local_w_y', 'local_w_r', 'local_w_a', 'local_sf_magnitude', 'local_sf_xy_direction',
'local_sf_ra_direction']
if sorted(first_half.voxel.unique()) != sorted(second_half.voxel.unique()):
raise Exception("the two dataframes must have same stimulus classes!")
if sorted(first_half.stimulus_class.unique()) != sorted(second_half.stimulus_class.unique()):
raise Exception("the two dataframes must have same voxels!")
df = pd.merge(first_half, second_half, on=cols, suffixes=['_1', '_2'], validate='1:1')
df = df.set_index('voxel')
all_data = all_data.set_index('voxel')
df['overall_precision'] = all_data['precision']
return df
class NoiseCeilingDataset(torchdata.Dataset):
"""Dataset for computing noise ceiling
the __getitem__ method here returns all (48) values for a single
voxel, so keep that in mind when setting batch size. this is done
because (in the loss function) we normalize the predictions and
target so that that vector of length 48 has a norm of one.
In addition the features and targets, we also return the precision
This dataset returns two sets estimates of the amplitude, as given
by GLMdenoise; one set form the features, and the second (along with
the precision) form the target. There are two modes for this
dataset, depending on the input df (this is saved as the attribute
ds.mode):
- 'split_half': df created by combine_dfs(). Then features are the
amplitude estimates from GLMdenoise fit to one half the data and
the targets are the amplitude estimates from GLMdenoise fit to the
other half (without replacement). The precision is from GLMdenoise
fit to all the data.
- 'monte_carlo': df created by sample_df(). Then features are
amplitude estimate from one bootstrap from GLMdenoise fit to all
data, and targets are from another bootstrap (selected without
replacement so they will never be the same bootstrap; each (voxel,
stimulus) pair is selected independently). The precision is from
GLMdenoise fit to all the data.
Parameters
----------
df : str or pd.DataFrame
the df or the path to the df to use for this dataset, as created
by sfp.noise_ceiling.combine_dfs or sfp.noise_ceiling.sample_dfs
device : str or torch.device
the device this dataset should live, either cpu or a specific
gpu
df_filter : function or None, optional.
If not None, a function that takes a dataframe as input and
returns one (most likely, a subset of the original) as
output. See `drop_voxels_with_any_negative_amplitudes` for an
example.
"""
def __init__(self, df, device, df_filter=None,):
try:
df_path = df
df = pd.read_csv(df)
except ValueError:
df_path = None
pass
if df_filter is not None:
# we want the index to be reset so we can use iloc in get_single_item below. this
# ensures that iloc and loc will return the same thing, which isn't otherwise the
# case. and we want them to be the same because Dataloader assumes iloc but our custom
# get_voxel needs loc.
df = df_filter(df).reset_index()
# in order to make sure that we can iterate through the dataset (as dataloader does), we
# need to create a new "voxel" column. this column just relabels the voxel column, running
# from 0 to df.voxel.nunique() while ensuring that voxel identity is preserved. if
# df_filter is None, df.voxel_reindexed should just be a copy of df.voxel
new_idx = pd.Series(range(df.voxel.nunique()), df.voxel.unique())
df = df.set_index('voxel')
df['voxel_reindexed'] = new_idx
if 'bootstrap_num' in df.columns:
raise Exception("Either this should be the split-half df, in which case it must be "
"computed on the summarized df, which has no 'bootstrap_num' columns, "
"or this should be the monte carlo df, in which case it must have "
"'bootstrap_num_1' and 'bootstrap_num_2' columns!")
if df.empty:
raise Exception("Dataframe is empty!")
self.df = df.reset_index()
self.device = device
self.df_path = df_path
self.stimulus_class = sorted(df.stimulus_class.unique())
self.bootstrap_num = None
if 'overall_precision' in df.columns:
self.mode = 'split_half'
else:
self.mode = 'monte_carlo'
def get_single_item(self, idx):
row = self.df.iloc[idx]
try:
# this is for the split-half noise ceiling
feature = row[['amplitude_estimate_median_1']].values.astype(float)
target = row[['amplitude_estimate_median_2', 'overall_precision']].values.astype(float)
except KeyError:
# this is for the Monte Carlo noise ceiling
feature = row[['amplitude_estimate_1']].values.astype(float)
target = row[['amplitude_estimate_2', 'precision']].values.astype(float)
feature = sfp_model._cast_as_tensor(feature)
target = sfp_model._cast_as_tensor(target)
return feature.to(self.device), target.to(self.device)
def __getitem__(self, idx):
vox_idx = self.df[self.df.voxel_reindexed == idx].index
return self.get_single_item(vox_idx)
def get_voxel(self, idx):
vox_idx = self.df[self.df.voxel == idx].index
return self.get_single_item(vox_idx)
def __len__(self):
return self.df.voxel.nunique()
class NoiseCeiling(torch.nn.Module):
"""simple linear model for computing the noise ceiling
This is the simplest possible model: we're just trying to fit the
line giving the relationship between the amplitudes estimated from
two halves of the data. If they were identical, then slope=1, and
intercept=0.
Our model predicts that:
Y = slope * X + intercept
where Y is the amplitudes estimated from the second half of the
dataset, X is those from the first, and slope and intercept are the
two (scalar) parameters.
On initialization, either parameter can be set to None, in which
case they will be drawn from a uniform distribution between 0 and 1.
Default parameters predict that the two split halves are identical,
which is probably what you want to use
Model parameters
----------------
slope : float
the slope of the linear relationship between the amplitude
estimates from the two halves
intercept : float
the intercept of the linear relationship between the amplitude
estimates from the two halves
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
if slope is None:
slope = torch.rand(1)[0]
if intercept is None:
intercept = torch.rand(1)[0]
self.slope = sfp_model._cast_as_param(slope)
self.intercept = sfp_model._cast_as_param(intercept)
self.model_type = 'noise_ceiling'
def __str__(self):
return (f"NoiseCeiling({self.slope:.03f} X + {self.intercept:.03f})")
def __repr__(self):
return self.__str__()
def evaluate(self, first_half):
"""generate predictions for second_half from first_half
"""
return self.slope * first_half + self.intercept
def forward(self, inputs):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
return self.evaluate(inputs.select(-1, 0))
def plot_noise_ceiling_model(model, df, overall_loss=None):
"""Plot model's predictions with the data
this just creates a simple scatterplot with the amplitudes estimated
from the first half (the features) on the x-axis, and those from the
second half (the targets) on the y-axis, with a red dashed line
showing the prediction of model for this range of values
Parameters
----------
model : NoiseCeiling
A trained sfp.noise_ceiling.NoiseCeiling model
df : pd.DataFrame
The dataframe created by sfp.noise_ceiling.combined_dfs, which
contains the columns amplitude_estiamte_median_1 and
amplitude_estiamte_median_2
overall_loss : float or None, optional
The overall loss of this model, as computed by
get_overall_loss(). If not None, will add to the plot. If None,
will not.
Returns
-------
fig : plt.Figure
figure containing the plot
"""
if 'amplitude_estimate_1' in df.columns:
ampl_col_name = 'amplitude_estimate'
else:
ampl_col_name = 'amplitude_estimate_median'
ax = sns.scatterplot(f'{ampl_col_name}_1', f'{ampl_col_name}_2', data=df)
x = np.linspace(df[f'{ampl_col_name}_1'].min(), df[f'{ampl_col_name}_1'].max(),
1000)
ax.plot(x, model.slope.detach().numpy() * x + model.intercept.detach().numpy(), 'r--')
ax.set_title(f'Predictions for {model}')
ax.axhline(color='gray', linestyle='dashed')
ax.axvline(color='gray', linestyle='dashed')
text = ""
if overall_loss is not None:
text += f'Overall loss:\n{overall_loss:.05f}\n\n'
text += (f"Seed: {df.noise_ceiling_seed.unique()[0]}\nBootstraps: "
f"[{df.bootstrap_num_1.unique()[0]}, {df.bootstrap_num_2.unique()[0]}]")
ax.text(1.01, .5, text, transform=ax.transAxes, va='center')
return ax.figure
def get_overall_loss(model, ds):
"""Compute the loss of model on the full dataset
This computes the loss of model on the full dataset and is used to
get a final sense of how well the model performed
Parameters
----------
model : sfp.noise_ceiling.NoiseCeiling
A trained sfp.noise_ceiling.NoiseCeiling() model
ds : sfp.noise_ceiling.NoiseCeilingDataset
The dataset to evaluate the model on
Returns
-------
loss : torch.tensor
single-element tensor containing the loss of the model on the
full dataset
"""
dl = torchdata.DataLoader(ds, len(ds))
features, targets = next(iter(dl))
return sfp_model.weighted_normed_loss(model(features), targets)
def split_half(df_path, save_stem, seed=0, batch_size=10, learning_rate=.1, max_epochs=100, gpus=0):
"""find the split-half noise ceiling for a single scanning session and save the output
In addition to the standard sfp_model outputs, we also save a figure
showing the predictions and loss of the final noise ceiling model
The outputs will be saved at `save_stem` plus the following
suffixes: "_loss.csv", "_results_df.csv", "_model.pt",
"_model_history.csv", "_predictions.png"
Parameters
----------
df_path : str
The path where the merged df is saved (as created by
sfp.noise_ceiling.combine_dfs)
save_stem : str
the stem of the path to save things at (i.e., should not end in
the extension)
seed : int, optional
random seed to use (used to set both torch and numpy's RNG)
batch_size : int, optional
The batch size for training the model (in number of voxels)
learning_rate : float, optional
The learning rate for the optimization algorithm
max_epochs : int, optional
The number of epochs to train for
gpus : {0, 1}, optional
How many gpus to use
"""
np.random.seed(seed)
torch.manual_seed(seed)
if gpus == 1:
device = torch.device('cuda:0')
elif gpus == 0:
device = torch.device('cpu')
else:
raise Exception(f"Only 0 and 1 gpus supported right now, not {gpus}")
ds = NoiseCeilingDataset(df_path, device)
model = NoiseCeiling(None, None).to(device)
model, loss, results, history = sfp_model.train_model(model, ds, max_epochs, batch_size,
learning_rate=learning_rate,
save_path_stem=save_stem)
model.eval()
sfp_model.save_outputs(model, loss, results, history, save_stem)
overall_loss = get_overall_loss(model, ds)
with sns.axes_style('white'):
fig = plot_noise_ceiling_model(model, pd.read_csv(df_path), overall_loss.item())
fig.savefig(save_stem+"_predictions.png", bbox_inches='tight')
def monte_carlo(df, save_stem, **metadata):
"""find the Monte Carlo noise ceiling for a single scanning session and save the output
Note that this doesn't involve training the model at all, we simply
see whether the two values are identical (i.e., we use the model
NoiseCeiling(1, 0)).
Because we don't train the model, the outputs are a little different:
- save_stem+"_loss.csv" is a single-row pd.Dataframe containing the
loss and the values passed as metadata
- save_stem+"_predictions.png" is a a figure showing the predictions
and loss of the final noise ceiling model.
Parameters
----------
df : pd.DataFrame
The sampled df (as created by sfp.noise_ceiling.sample_dfs)
save_stem : str
the stem of the path to save things at (i.e., should not end in
the extension)
metadata:
Extra key=value pairs to add to the loss.csv output
"""
device = torch.device('cpu')
orig_df = df.copy(deep=True)
ds = NoiseCeilingDataset(df, device)
model = NoiseCeiling(1, 0).to(device)
model.eval()
overall_loss = get_overall_loss(model, ds)
metadata['loss'] = overall_loss.item()
loss_df = | pd.DataFrame(metadata, index=[0]) | pandas.DataFrame |
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn API can be used to cluster
documents by topics using a `Bag of Words approach
<https://en.wikipedia.org/wiki/Bag-of-words_model>`_.
Two algorithms are demoed: :class:`~sklearn.cluster.KMeans` and its more
scalable variant, :class:`~sklearn.cluster.MiniBatchKMeans`. Additionally,
latent semantic analysis is used to reduce dimensionality and discover latent
patterns in the data.
This example uses two different text vectorizers: a
:class:`~sklearn.feature_extraction.text.TfidfVectorizer` and a
:class:`~sklearn.feature_extraction.text.HashingVectorizer`. See the example
notebook :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`
for more information on vectorizers and a comparison of their processing times.
For document analysis via a supervised learning approach, see the example script
:ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`.
"""
# Author: <NAME> <<EMAIL>>
# <NAME>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
# %%
# Loading text data
# =================
#
# We load data from :ref:`20newsgroups_dataset`, which comprises around 18,000
# newsgroups posts on 20 topics. For illustrative purposes and to reduce the
# computational cost, we select a subset of 4 topics only accounting for around
# 3,400 documents. See the example
# :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`
# to gain intuition on the overlap of such topics.
#
# Notice that, by default, the text samples contain some message metadata such
# as `"headers"`, `"footers"` (signatures) and `"quotes"` to other posts. We use
# the `remove` parameter from :func:`~sklearn.datasets.fetch_20newsgroups` to
# strip those features and have a more sensible clustering problem.
import numpy as np
from sklearn.datasets import fetch_20newsgroups
categories = [
"alt.atheism",
"talk.religion.misc",
"comp.graphics",
"sci.space",
]
dataset = fetch_20newsgroups(
remove=("headers", "footers", "quotes"),
subset="all",
categories=categories,
shuffle=True,
random_state=42,
)
labels = dataset.target
unique_labels, category_sizes = np.unique(labels, return_counts=True)
true_k = unique_labels.shape[0]
print(f"{len(dataset.data)} documents - {true_k} categories")
# %%
# Quantifying the quality of clustering results
# =============================================
#
# In this section we define a function to score different clustering pipelines
# using several metrics.
#
# Clustering algorithms are fundamentally unsupervised learning methods.
# However, since we happen to have class labels for this specific dataset, it is
# possible to use evaluation metrics that leverage this "supervised" ground
# truth information to quantify the quality of the resulting clusters. Examples
# of such metrics are the following:
#
# - homogeneity, which quantifies how much clusters contain only members of a
# single class;
#
# - completeness, which quantifies how much members of a given class are
# assigned to the same clusters;
#
# - V-measure, the harmonic mean of completeness and homogeneity;
#
# - Rand-Index, which measures how frequently pairs of data points are grouped
# consistently according to the result of the clustering algorithm and the
# ground truth class assignment;
#
# - Adjusted Rand-Index, a chance-adjusted Rand-Index such that random cluster
# assignment have an ARI of 0.0 in expectation.
#
# If the ground truth labels are not known, evaluation can only be performed
# using the model results itself. In that case, the Silhouette Coefficient comes
# in handy.
#
# For more reference, see :ref:`clustering_evaluation`.
from collections import defaultdict
from sklearn import metrics
from time import time
evaluations = []
evaluations_std = []
def fit_and_evaluate(km, X, name=None, n_runs=5):
name = km.__class__.__name__ if name is None else name
train_times = []
scores = defaultdict(list)
for seed in range(n_runs):
km.set_params(random_state=seed)
t0 = time()
km.fit(X)
train_times.append(time() - t0)
scores["Homogeneity"].append(metrics.homogeneity_score(labels, km.labels_))
scores["Completeness"].append(metrics.completeness_score(labels, km.labels_))
scores["V-measure"].append(metrics.v_measure_score(labels, km.labels_))
scores["Adjusted Rand-Index"].append(
metrics.adjusted_rand_score(labels, km.labels_)
)
scores["Silhouette Coefficient"].append(
metrics.silhouette_score(X, km.labels_, sample_size=2000)
)
train_times = np.asarray(train_times)
print(f"clustering done in {train_times.mean():.2f} ± {train_times.std():.2f} s ")
evaluation = {
"estimator": name,
"train_time": train_times.mean(),
}
evaluation_std = {
"estimator": name,
"train_time": train_times.std(),
}
for score_name, score_values in scores.items():
mean_score, std_score = np.mean(score_values), np.std(score_values)
print(f"{score_name}: {mean_score:.3f} ± {std_score:.3f}")
evaluation[score_name] = mean_score
evaluation_std[score_name] = std_score
evaluations.append(evaluation)
evaluations_std.append(evaluation_std)
# %%
# K-means clustering on text features
# ===================================
#
# Two feature extraction methods are used in this example:
#
# - :class:`~sklearn.feature_extraction.text.TfidfVectorizer` uses an in-memory
# vocabulary (a Python dict) to map the most frequent words to features
# indices and hence compute a word occurrence frequency (sparse) matrix. The
# word frequencies are then reweighted using the Inverse Document Frequency
# (IDF) vector collected feature-wise over the corpus.
#
# - :class:`~sklearn.feature_extraction.text.HashingVectorizer` hashes word
# occurrences to a fixed dimensional space, possibly with collisions. The word
# count vectors are then normalized to each have l2-norm equal to one
# (projected to the euclidean unit-sphere) which seems to be important for
# k-means to work in high dimensional space.
#
# Furthermore it is possible to post-process those extracted features using
# dimensionality reduction. We will explore the impact of those choices on the
# clustering quality in the following.
#
# Feature Extraction using TfidfVectorizer
# ----------------------------------------
#
# We first benchmark the estimators using a dictionary vectorizer along with an
# IDF normalization as provided by
# :class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(
max_df=0.5,
min_df=5,
stop_words="english",
)
t0 = time()
X_tfidf = vectorizer.fit_transform(dataset.data)
print(f"vectorization done in {time() - t0:.3f} s")
print(f"n_samples: {X_tfidf.shape[0]}, n_features: {X_tfidf.shape[1]}")
# %%
# After ignoring terms that appear in more than 50% of the documents (as set by
# `max_df=0.5`) and terms that are not present in at least 5 documents (set by
# `min_df=5`), the resulting number of unique terms `n_features` is around
# 8,000. We can additionally quantify the sparsity of the `X_tfidf` matrix as
# the fraction of non-zero entries devided by the total number of elements.
print(f"{X_tfidf.nnz / np.prod(X_tfidf.shape):.3f}")
# %%
# We find that around 0.7% of the entries of the `X_tfidf` matrix are non-zero.
#
# .. _kmeans_sparse_high_dim:
#
# Clustering sparse data with k-means
# -----------------------------------
#
# As both :class:`~sklearn.cluster.KMeans` and
# :class:`~sklearn.cluster.MiniBatchKMeans` optimize a non-convex objective
# function, their clustering is not guaranteed to be optimal for a given random
# init. Even further, on sparse high-dimensional data such as text vectorized
# using the Bag of Words approach, k-means can initialize centroids on extremely
# isolated data points. Those data points can stay their own centroids all
# along.
#
# The following code illustrates how the previous phenomenon can sometimes lead
# to highly imbalanced clusters, depending on the random initialization:
from sklearn.cluster import KMeans
for seed in range(5):
kmeans = KMeans(
n_clusters=true_k,
max_iter=100,
n_init=1,
random_state=seed,
).fit(X_tfidf)
cluster_ids, cluster_sizes = np.unique(kmeans.labels_, return_counts=True)
print(f"Number of elements asigned to each cluster: {cluster_sizes}")
print()
print(
"True number of documents in each category according to the class labels: "
f"{category_sizes}"
)
# %%
# To avoid this problem, one possibility is to increase the number of runs with
# independent random initiations `n_init`. In such case the clustering with the
# best inertia (objective function of k-means) is chosen.
kmeans = KMeans(
n_clusters=true_k,
max_iter=100,
n_init=5,
)
fit_and_evaluate(kmeans, X_tfidf, name="KMeans\non tf-idf vectors")
# %%
# All those clustering evaluation metrics have a maximum value of 1.0 (for a
# perfect clustering result). Higher values are better. Values of the Adjusted
# Rand-Index close to 0.0 correspond to a random labeling. Notice from the
# scores above that the cluster assignment is indeed well above chance level,
# but the overall quality can certainly improve.
#
# Keep in mind that the class labels may not reflect accurately the document
# topics and therefore metrics that use labels are not necessarily the best to
# evaluate the quality of our clustering pipeline.
#
# Performing dimensionality reduction using LSA
# ---------------------------------------------
#
# A `n_init=1` can still be used as long as the dimension of the vectorized
# space is reduced first to make k-means more stable. For such purpose we use
# :class:`~sklearn.decomposition.TruncatedSVD`, which works on term count/tf-idf
# matrices. Since SVD results are not normalized, we redo the normalization to
# improve the :class:`~sklearn.cluster.KMeans` result. Using SVD to reduce the
# dimensionality of TF-IDF document vectors is often known as `latent semantic
# analysis <https://en.wikipedia.org/wiki/Latent_semantic_analysis>`_ (LSA) in
# the information retrieval and text mining literature.
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False))
t0 = time()
X_lsa = lsa.fit_transform(X_tfidf)
explained_variance = lsa[0].explained_variance_ratio_.sum()
print(f"LSA done in {time() - t0:.3f} s")
print(f"Explained variance of the SVD step: {explained_variance * 100:.1f}%")
# %%
# Using a single initialization means the processing time will be reduced for
# both :class:`~sklearn.cluster.KMeans` and
# :class:`~sklearn.cluster.MiniBatchKMeans`.
kmeans = KMeans(
n_clusters=true_k,
max_iter=100,
n_init=1,
)
fit_and_evaluate(kmeans, X_lsa, name="KMeans\nwith LSA on tf-idf vectors")
# %%
# We can observe that clustering on the LSA representation of the document is
# significantly faster (both because of `n_init=1` and because the
# dimensionality of the LSA feature space is much smaller). Furthermore, all the
# clustering evaluation metrics have improved. We repeat the experiment with
# :class:`~sklearn.cluster.MiniBatchKMeans`.
from sklearn.cluster import MiniBatchKMeans
minibatch_kmeans = MiniBatchKMeans(
n_clusters=true_k,
n_init=1,
init_size=1000,
batch_size=1000,
)
fit_and_evaluate(
minibatch_kmeans,
X_lsa,
name="MiniBatchKMeans\nwith LSA on tf-idf vectors",
)
# %%
# Top terms per cluster
# ---------------------
#
# Since :class:`~sklearn.feature_extraction.text.TfidfVectorizer` can be
# inverted we can identify the cluster centers, which provide an intuition of
# the most influential words **for each cluster**. See the example script
# :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`
# for a comparison with the most predictive words **for each target class**.
original_space_centroids = lsa[0].inverse_transform(kmeans.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
terms = vectorizer.get_feature_names_out()
for i in range(true_k):
print(f"Cluster {i}: ", end="")
for ind in order_centroids[i, :10]:
print(f"{terms[ind]} ", end="")
print()
# %%
# HashingVectorizer
# -----------------
# An alternative vectorization can be done using a
# :class:`~sklearn.feature_extraction.text.HashingVectorizer` instance, which
# does not provide IDF weighting as this is a stateless model (the fit method
# does nothing). When IDF weighting is needed it can be added by pipelining the
# :class:`~sklearn.feature_extraction.text.HashingVectorizer` output to a
# :class:`~sklearn.feature_extraction.text.TfidfTransformer` instance. In this
# case we also add LSA to the pipeline to reduce the dimension and sparcity of
# the hashed vector space.
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
lsa_vectorizer = make_pipeline(
HashingVectorizer(stop_words="english", n_features=50_000),
TfidfTransformer(),
TruncatedSVD(n_components=100, random_state=0),
Normalizer(copy=False),
)
t0 = time()
X_hashed_lsa = lsa_vectorizer.fit_transform(dataset.data)
print(f"vectorization done in {time() - t0:.3f} s")
# %%
# One can observe that the LSA step takes a relatively long time to fit,
# especially with hashed vectors. The reason is that a hashed space is typically
# large (set to `n_features=50_000` in this example). One can try lowering the
# number of features at the expense of having a larger fraction of features with
# hash collisions as shown in the example notebook
# :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
#
# We now fit and evaluate the `kmeans` and `minibatch_kmeans` instances on this
# hashed-lsa-reduced data:
fit_and_evaluate(kmeans, X_hashed_lsa, name="KMeans\nwith LSA on hashed vectors")
# %%
fit_and_evaluate(
minibatch_kmeans,
X_hashed_lsa,
name="MiniBatchKMeans\nwith LSA on hashed vectors",
)
# %%
# Both methods lead to good results that are similar to running the same models
# on the traditional LSA vectors (without hashing).
#
# Clustering evaluation summary
# ==============================
import pandas as pd
import matplotlib.pyplot as plt
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(16, 6), sharey=True)
df = | pd.DataFrame(evaluations[::-1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR, OneClassSVM
from sklearn.model_selection import KFold, cross_val_predict, GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF, ConstantKernel, Matern, DotProduct
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.neighbors import NearestNeighbors
regression_method = 'gpr_one_kernel' # 回帰分析手法 'ols_linear', 'ols_nonlinear', 'svr_linear', 'svr_gaussian', 'gpr_one_kernel', 'gpr_kernels'
ad_method = 'ocsvm' # AD設定手法 'knn', 'ocsvm', 'ocsvm_gamma_optimization'
fold_number = 10 # クロスバリデーションの fold 数
rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用
linear_svr_cs = 2 ** np.arange(-10, 5, dtype=float) # 線形SVR の C の候補
linear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # 線形SVRの ε の候補
nonlinear_svr_cs = 2 ** np.arange(-5, 10, dtype=float) # SVR の C の候補
nonlinear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float) # SVR の ε の候補
nonlinear_svr_gammas = 2 ** np.arange(-20, 10, dtype=float) # SVR のガウシアンカーネルの γ の候補
kernel_number = 2 # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
k_in_knn = 5 # k-NN における k
ocsvm_nu = 0.04 # OCSVM における ν。トレーニングデータにおけるサンプル数に対する、サポートベクターの数の下限の割合
ocsvm_gamma = 0.1 # OCSVM における γ
ocsvm_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
dataset = pd.read_csv('resin.csv', index_col=0, header=0)
x_prediction = pd.read_csv('remaining_samples.csv', index_col=0, header=0)
# データ分割
y = dataset.iloc[:, 0] # 目的変数
x = dataset.iloc[:, 1:] # 説明変数
# 非線形変換
if regression_method == 'ols_nonlinear':
x_tmp = x.copy()
x_prediction_tmp = x_prediction.copy()
x_square = x ** 2 # 二乗項
x_prediction_square = x_prediction ** 2 # 二乗項
# 追加
print('\n二乗項と交差項の追加')
for i in range(x_tmp.shape[1]):
print(i + 1, '/', x_tmp.shape[1])
for j in range(x_tmp.shape[1]):
if i == j: # 二乗項
x = pd.concat([x, x_square.rename(columns={x_square.columns[i]: '{0}^2'.format(x_square.columns[i])}).iloc[:, i]], axis=1)
x_prediction = pd.concat([x_prediction, x_prediction_square.rename(columns={x_prediction_square.columns[i]: '{0}^2'.format(x_prediction_square.columns[i])}).iloc[:, i]], axis=1)
elif i < j: # 交差項
x_cross = x_tmp.iloc[:, i] * x_tmp.iloc[:, j]
x_prediction_cross = x_prediction_tmp.iloc[:, i] * x_prediction_tmp.iloc[:, j]
x_cross.name = '{0}*{1}'.format(x_tmp.columns[i], x_tmp.columns[j])
x_prediction_cross.name = '{0}*{1}'.format(x_prediction_tmp.columns[i], x_prediction_tmp.columns[j])
x = pd.concat([x, x_cross], axis=1)
x_prediction = pd.concat([x_prediction, x_prediction_cross], axis=1)
# 標準偏差が 0 の特徴量の削除
deleting_variables = x.columns[x.std() == 0]
x = x.drop(deleting_variables, axis=1)
x_prediction = x_prediction.drop(deleting_variables, axis=1)
# カーネル 11 種類
kernels = [ConstantKernel() * DotProduct() + WhiteKernel(),
ConstantKernel() * RBF() + WhiteKernel(),
ConstantKernel() * RBF() + WhiteKernel() + ConstantKernel() * DotProduct(),
ConstantKernel() * RBF(np.ones(x.shape[1])) + WhiteKernel(),
ConstantKernel() * RBF(np.ones(x.shape[1])) + WhiteKernel() + ConstantKernel() * DotProduct(),
ConstantKernel() * Matern(nu=1.5) + WhiteKernel(),
ConstantKernel() * Matern(nu=1.5) + WhiteKernel() + ConstantKernel() * DotProduct(),
ConstantKernel() * Matern(nu=0.5) + WhiteKernel(),
ConstantKernel() * Matern(nu=0.5) + WhiteKernel() + ConstantKernel() * DotProduct(),
ConstantKernel() * Matern(nu=2.5) + WhiteKernel(),
ConstantKernel() * Matern(nu=2.5) + WhiteKernel() + ConstantKernel() * DotProduct()]
# オートスケーリング
autoscaled_y = (y - y.mean()) / y.std()
autoscaled_x = (x - x.mean()) / x.std()
x_prediction.columns = x.columns
autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()
# モデル構築
if regression_method == 'ols_linear' or regression_method == 'ols_nonlinear':
model = LinearRegression()
elif regression_method == 'svr_linear':
# クロスバリデーションによる C, ε の最適化
cross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True) # クロスバリデーションの分割の設定
gs_cv = GridSearchCV(SVR(kernel='linear'), {'C':linear_svr_cs, 'epsilon':linear_svr_epsilons}, cv=cross_validation) # グリッドサーチの設定
gs_cv.fit(autoscaled_x, autoscaled_y) # グリッドサーチ + クロスバリデーション実施
optimal_linear_svr_c = gs_cv.best_params_['C'] # 最適な C
optimal_linear_svr_epsilon = gs_cv.best_params_['epsilon'] # 最適な ε
print('最適化された C : {0} (log(C)={1})'.format(optimal_linear_svr_c, np.log2(optimal_linear_svr_c)))
print('最適化された ε : {0} (log(ε)={1})'.format(optimal_linear_svr_epsilon, np.log2(optimal_linear_svr_epsilon)))
model = SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon) # SVRモデルの宣言
elif regression_method == 'svr_gaussian':
# C, ε, γの最適化
# 分散最大化によるガウシアンカーネルのγの最適化
variance_of_gram_matrix = []
autoscaled_x_array = np.array(autoscaled_x)
for nonlinear_svr_gamma in nonlinear_svr_gammas:
gram_matrix = np.exp(- nonlinear_svr_gamma * ((autoscaled_x_array[:, np.newaxis] - autoscaled_x_array) ** 2).sum(axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_nonlinear_gamma = nonlinear_svr_gammas[np.where(variance_of_gram_matrix==np.max(variance_of_gram_matrix))[0][0]]
cross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True) # クロスバリデーションの分割の設定
# CV による ε の最適化
gs_cv = GridSearchCV(SVR(kernel='rbf', C=3, gamma=optimal_nonlinear_gamma),
{'epsilon': nonlinear_svr_epsilons},
cv=cross_validation)
gs_cv.fit(autoscaled_x, autoscaled_y)
optimal_nonlinear_epsilon = gs_cv.best_params_['epsilon']
# CV による C の最適化
gs_cv = GridSearchCV(SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma),
{'C': nonlinear_svr_cs},
cv=cross_validation)
gs_cv.fit(autoscaled_x, autoscaled_y)
optimal_nonlinear_c = gs_cv.best_params_['C']
# CV による γ の最適化
gs_cv = GridSearchCV(SVR(kernel='rbf', epsilon=optimal_nonlinear_epsilon, C=optimal_nonlinear_c),
{'gamma': nonlinear_svr_gammas},
cv=cross_validation)
gs_cv.fit(autoscaled_x, autoscaled_y)
optimal_nonlinear_gamma = gs_cv.best_params_['gamma']
# 結果の確認
print('最適化された C : {0} (log(C)={1})'.format(optimal_nonlinear_c, np.log2(optimal_nonlinear_c)))
print('最適化された ε : {0} (log(ε)={1})'.format(optimal_nonlinear_epsilon, np.log2(optimal_nonlinear_epsilon)))
print('最適化された γ : {0} (log(γ)={1})'.format(optimal_nonlinear_gamma, np.log2(optimal_nonlinear_gamma)))
# モデル構築
model = SVR(kernel='rbf', C=optimal_nonlinear_c, epsilon=optimal_nonlinear_epsilon, gamma=optimal_nonlinear_gamma) # SVR モデルの宣言
elif regression_method == 'gpr_one_kernel':
selected_kernel = kernels[kernel_number]
model = GaussianProcessRegressor(alpha=0, kernel=selected_kernel)
elif regression_method == 'gpr_kernels':
# クロスバリデーションによるカーネル関数の最適化
cross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True) # クロスバリデーションの分割の設定
r2cvs = [] # 空の list。主成分の数ごとに、クロスバリデーション後の r2 を入れていきます
for index, kernel in enumerate(kernels):
print(index + 1, '/', len(kernels))
model = GaussianProcessRegressor(alpha=0, kernel=kernel)
estimated_y_in_cv = np.ndarray.flatten(cross_val_predict(model, autoscaled_x, autoscaled_y, cv=cross_validation))
estimated_y_in_cv = estimated_y_in_cv * y.std(ddof=1) + y.mean()
r2cvs.append(r2_score(y, estimated_y_in_cv))
optimal_kernel_number = np.where(r2cvs == np.max(r2cvs))[0][0] # クロスバリデーション後の r2 が最も大きいカーネル関数の番号
optimal_kernel = kernels[optimal_kernel_number] # クロスバリデーション後の r2 が最も大きいカーネル関数
print('クロスバリデーションで選択されたカーネル関数の番号 :', optimal_kernel_number)
print('クロスバリデーションで選択されたカーネル関数 :', optimal_kernel)
# モデル構築
model = GaussianProcessRegressor(alpha=0, kernel=optimal_kernel) # GPR モデルの宣言
model.fit(autoscaled_x, autoscaled_y) # モデル構築
# 標準回帰係数
if regression_method == 'ols_linear' or regression_method == 'ols_nonlinear' or regression_method == 'svr_linear':
if regression_method == 'svr_linear':
standard_regression_coefficients = model.coef_.T
else:
standard_regression_coefficients = model.coef_
standard_regression_coefficients = pd.DataFrame(standard_regression_coefficients, index=x.columns, columns=['standard_regression_coefficients'])
standard_regression_coefficients.to_csv(
'standard_regression_coefficients_{0}.csv'.format(regression_method)) # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# トレーニングデータの推定
autoscaled_estimated_y = model.predict(autoscaled_x) # y の推定
estimated_y = autoscaled_estimated_y * y.std() + y.mean() # スケールをもとに戻す
estimated_y = pd.DataFrame(estimated_y, index=x.index, columns=['estimated_y'])
# トレーニングデータの実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18
plt.scatter(y, estimated_y.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y.max(), estimated_y.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y.min(), estimated_y.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.gca().set_aspect('equal', adjustable='box') # 図の形を正方形に
plt.show() # 以上の設定で描画
# トレーニングデータのr2, RMSE, MAE
print('r^2 for training data :', r2_score(y, estimated_y))
print('RMSE for training data :', mean_squared_error(y, estimated_y, squared=False))
print('MAE for training data :', mean_absolute_error(y, estimated_y))
# トレーニングデータの結果の保存
y_for_save = pd.DataFrame(y)
y_for_save.columns = ['actual_y']
y_error_train = y_for_save.iloc[:, 0] - estimated_y.iloc[:, 0]
y_error_train = pd.DataFrame(y_error_train)
y_error_train.columns = ['error_of_y(actual_y-estimated_y)']
results_train = pd.concat([y_for_save, estimated_y, y_error_train], axis=1) # 結合
results_train.to_csv('estimated_y_in_detail_{0}.csv'.format(regression_method)) # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# クロスバリデーションによる y の値の推定
cross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True) # クロスバリデーションの分割の設定
autoscaled_estimated_y_in_cv = cross_val_predict(model, autoscaled_x, autoscaled_y, cv=cross_validation) # y の推定
estimated_y_in_cv = autoscaled_estimated_y_in_cv * y.std() + y.mean() # スケールをもとに戻す
estimated_y_in_cv = pd.DataFrame(estimated_y_in_cv, index=x.index, columns=['estimated_y'])
# クロスバリデーションにおける実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18
plt.scatter(y, estimated_y_in_cv.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y.max(), estimated_y_in_cv.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y.min(), estimated_y_in_cv.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.gca().set_aspect('equal', adjustable='box') # 図の形を正方形に
plt.show() # 以上の設定で描画
# クロスバリデーションにおけるr2, RMSE, MAE
print('r^2 in cross-validation :', r2_score(y, estimated_y_in_cv))
print('RMSE in cross-validation :', mean_squared_error(y, estimated_y_in_cv, squared=False))
print('MAE in cross-validation :', mean_absolute_error(y, estimated_y_in_cv))
# クロスバリデーションの結果の保存
y_error_in_cv = y_for_save.iloc[:, 0] - estimated_y_in_cv.iloc[:, 0]
y_error_in_cv = pd.DataFrame(y_error_in_cv)
y_error_in_cv.columns = ['error_of_y(actual_y-estimated_y)']
results_in_cv = pd.concat([y_for_save, estimated_y_in_cv, y_error_in_cv], axis=1) # 結合
results_in_cv.to_csv('estimated_y_in_cv_in_detail_{0}.csv'.format(regression_method)) # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# 予測
if regression_method == 'gpr_one_kernel' or regression_method == 'gpr_kernels': # 標準偏差あり
estimated_y_prediction, estimated_y_prediction_std = model.predict(autoscaled_x_prediction, return_std=True)
estimated_y_prediction_std = estimated_y_prediction_std * y.std()
estimated_y_prediction_std = pd.DataFrame(estimated_y_prediction_std, x_prediction.index, columns=['std_of_estimated_y'])
estimated_y_prediction_std.to_csv('estimated_y_prediction_{0}_std.csv'.format(regression_method)) # 予測値の標準偏差を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
else:
estimated_y_prediction = model.predict(autoscaled_x_prediction)
estimated_y_prediction = estimated_y_prediction * y.std() + y.mean()
estimated_y_prediction = pd.DataFrame(estimated_y_prediction, x_prediction.index, columns=['estimated_y'])
estimated_y_prediction.to_csv('estimated_y_prediction_{0}.csv'.format(regression_method)) # 予測結果を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# 非線形変換を戻す
if regression_method == 'ols_nonlinear':
x = x_tmp.copy()
x_prediction = x_prediction_tmp.copy()
# 標準偏差が 0 の特徴量の削除
deleting_variables = x.columns[x.std() == 0]
x = x.drop(deleting_variables, axis=1)
x_prediction = x_prediction.drop(deleting_variables, axis=1)
# オートスケーリング
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()
# AD
if ad_method == 'knn':
ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean')
ad_model.fit(autoscaled_x)
# サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに
# トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定
knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)
knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換
mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1),
columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均
mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定
sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え
ad_threshold = sorted_mean_of_knn_distance_train.iloc[
round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1]
# トレーニングデータに対して、AD の中か外かを判定
inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold
# 予測用データに対する k-NN 距離の計算
knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)
knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換
ad_index_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均
inside_ad_flag_prediction = ad_index_prediction <= ad_threshold
elif ad_method == 'ocsvm':
if ad_method == 'ocsvm_gamma_optimization':
# 分散最大化によるガウシアンカーネルのγの最適化
variance_of_gram_matrix = []
autoscaled_x_array = np.array(autoscaled_x)
for nonlinear_svr_gamma in ocsvm_gammas:
gram_matrix = np.exp(- nonlinear_svr_gamma * ((autoscaled_x_array[:, np.newaxis] - autoscaled_x_array) ** 2).sum(axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_gamma = ocsvm_gammas[np.where(variance_of_gram_matrix==np.max(variance_of_gram_matrix))[0][0]]
# 最適化された γ
print('最適化された gamma :', optimal_gamma)
else:
optimal_gamma = ocsvm_gamma
# OCSVM による AD
ad_model = OneClassSVM(kernel='rbf', gamma=optimal_gamma, nu=ocsvm_nu) # AD モデルの宣言
ad_model.fit(autoscaled_x) # モデル構築
# トレーニングデータのデータ密度 (f(x) の値)
data_density_train = ad_model.decision_function(autoscaled_x)
number_of_support_vectors = len(ad_model.support_)
number_of_outliers_in_training_data = sum(data_density_train < 0)
print('\nトレーニングデータにおけるサポートベクター数 :', number_of_support_vectors)
print('トレーニングデータにおけるサポートベクターの割合 :', number_of_support_vectors / x.shape[0])
print('\nトレーニングデータにおける外れサンプル数 :', number_of_outliers_in_training_data)
print('トレーニングデータにおける外れサンプルの割合 :', number_of_outliers_in_training_data / x.shape[0])
data_density_train = pd.DataFrame(data_density_train, index=x.index, columns=['ocsvm_data_density'])
data_density_train.to_csv('ocsvm_data_density_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# トレーニングデータに対して、AD の中か外かを判定
inside_ad_flag_train = data_density_train >= 0
# 予測用データのデータ密度 (f(x) の値)
ad_index_prediction = ad_model.decision_function(autoscaled_x_prediction)
number_of_outliers_in_prediction_data = sum(ad_index_prediction < 0)
print('\nテストデータにおける外れサンプル数 :', number_of_outliers_in_prediction_data)
print('テストデータにおける外れサンプルの割合 :', number_of_outliers_in_prediction_data / x_prediction.shape[0])
ad_index_prediction = | pd.DataFrame(ad_index_prediction, index=x_prediction.index, columns=['ocsvm_data_density']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total_return(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('total_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_total(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized_volatility(ret.shape[0], minp=1, levy_alpha=test_alpha).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('calmar_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_calmar_ratio(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('omega_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_omega_ratio(
ret.shape[0], minp=1, risk_free=test_risk_free, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sharpe_ratio(ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_deflated_sharpe_ratio(self):
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.01),
pd.Series([np.nan, np.nan, 0.0005355605507117676], index=ret.columns).rename('deflated_sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.03),
pd.Series([np.nan, np.nan, 0.0003423112350834066], index=ret.columns).rename('deflated_sharpe_ratio')
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_downside_risk(self, test_required_return):
res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.downside_risk(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('downside_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_downside_risk(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_sortino_ratio(self, test_required_return):
res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return)
res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return)
res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sortino_ratio(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sortino_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sortino_ratio(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_information_ratio(self):
res_a = empyrical.excess_sharpe(ret['a'], benchmark_rets['a'])
res_b = empyrical.excess_sharpe(ret['b'], benchmark_rets['b'])
res_c = empyrical.excess_sharpe(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.information_ratio(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.information_ratio(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('information_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_information_ratio(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_beta(self):
res_a = empyrical.beta(ret['a'], benchmark_rets['a'])
res_b = empyrical.beta(ret['b'], benchmark_rets['b'])
res_c = empyrical.beta(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.beta(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.beta(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('beta')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_beta(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_alpha(self, test_risk_free):
res_a = empyrical.alpha(ret['a'], benchmark_rets['a'], risk_free=test_risk_free)
res_b = empyrical.alpha(ret['b'], benchmark_rets['b'], risk_free=test_risk_free)
res_c = empyrical.alpha(ret['c'], benchmark_rets['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.alpha(benchmark_rets['a'], risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.alpha(benchmark_rets, risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('alpha')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_alpha(
benchmark_rets, ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_tail_ratio(self):
res_a = empyrical.tail_ratio(ret['a'])
res_b = empyrical.tail_ratio(ret['b'])
res_c = empyrical.tail_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.tail_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.tail_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('tail_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_tail_ratio(
ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('value_at_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_value_at_risk(
ret.shape[0], minp=1, cutoff=test_cutoff).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_cond_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.conditional_value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.conditional_value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.conditional_value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.cond_value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.cond_value_at_risk(cutoff=test_cutoff),
| pd.Series([res_a, res_b, res_c], index=ret.columns) | pandas.Series |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, | pd.Series(index=dr, data=expected) | pandas.Series |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([True, True, True, True])))
def test_is_not_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([False, False, False, False])))
def test_is_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_not_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
one_to_one_related_df = pandas.DataFrame.from_dict(
{
"STUDYID": [1, 2, 3, 1, 2],
"USUBJID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"STUDYDESC": ["Russia", "USA", "China", "Russia", "USA", ],
}
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYID", "comparator": "STUDYDESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYDESC", "comparator": "STUDYID"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--ID", "comparator": "--DESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--DESC", "comparator": "--ID"}
).equals(pandas.Series([True, True, True, True, True]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"TESTID": [1, 2, 1, 3],
"TESTNAME": ["Functional", "Stress", "Functional", "Stress", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTID", "comparator": "TESTNAME"}).equals(pandas.Series([True, False, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTNAME", "comparator": "TESTID"}).equals(pandas.Series([True, False, True, False]))
)
def test_is_not_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
valid_df = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
}
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([False, False, False, False]))
)
valid_df_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": [
"Doctor Consultation", "Heart Surgery", "Doctor Consultation", "Long Lasting Treatment",
],
}
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Consulting", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([True, False, True, True]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([True, False, True, True]))
)
df_violates_one_to_one_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"VISIT": ["Consulting", "Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": ["Doctor Consultation", "Doctor Consultation", "Heart Surgery", "Heart Surgery", "Long Lasting Treatment", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SIT", "comparator": "--SITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SITDESC", "comparator": "--SIT"}).equals(pandas.Series([True, True, True, True, False]))
)
def test_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertFalse(
DataframeType({"value": df}).empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertTrue(
DataframeType({"value": df}).empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_non_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertTrue(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertFalse(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_is_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([True, False, True]))
)
def test_not_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, False]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([False, True, False]))
)
def test_is_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": pandas.Series([1,2,3]).values
},
"AE": {
"AETERM": pandas.Series([31, 323, 33]).values
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "DATA", "AETERM"],
"IDVARVAL1": [4, 1, 31],
"IDVARVAL2": [5, 1, 35]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR1", "comparator": "IDVARVAL1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR2", "comparator": "IDVARVAL2", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, False]))
)
def test_not_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": | pandas.Series([1,2,3]) | pandas.Series |
#Import the libraries
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import yfinance as yf
import datetime
import math
from datetime import timedelta
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import plotting
import cvxpy as cp
# Import libraries to fetch historical EUR/USD prices
from forex_python.converter import get_rate
from joblib import Parallel, delayed
DATE_FORMAT = '%Y-%m-%d'
# Database maintainance functions
#Connects to a the pre-existing CSV price database
def connectAndLoadDb(exchange):
"""Connects to and loads the data for an exchange.
Parameters
----------
exchange : str
The name of the exchange stored at
"Price Databases\database_"+str(exchange)+".csv"
Returns
-------
DataFrame
database with dates & assets prices
in the native currency in each column
"""
print("Connecting database:"+str(exchange))
filename="Price Databases\database_"+str(exchange)+".csv"
database = | pd.read_csv(filename,index_col=False) | pandas.read_csv |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import re
from math import ceil
import pandas as pd
from sklearn.metrics import classification_report
from scipy.stats import shapiro, boxcox, yeojohnson
from scipy.stats import probplot
from sklearn.preprocessing import LabelEncoder, PowerTransformer
from category_encoders.target_encoder import TargetEncoder
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression, LogisticRegression
# from .charts.classification_visualization import classification_visualization
# from .charts.charts import Plot, ScatterChart
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.multiclass import unique_labels
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import json
from pyod.models.hbos import HBOS
from statsmodels.api import ProbPlot
# from .charts.charts_extras import (
# feature_importances_plot,
# regression_viz,
# classification_viz,
# )
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.svm import LinearSVC
import warnings
warnings.filterwarnings("ignore")
sns.set_palette("colorblind")
class CrawtoDS:
def __init__(
self,
data,
target,
test_data=None,
time_dependent=False,
features="infer",
problem="infer",
):
self.input_data = data
self.target = target
self.features = features
self.problem = problem
self.test_data = test_data
self.timedependent = time_dependent
if self.problem == "binary classification":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True, stratify=self.input_data[self.target],
)
elif self.problem == "regression":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True,
)
def nan_features(input_data):
"""a little complicated. map creates a %nan values and returns the feature if greater than the threshold.
filter simply filters out the false values """
f = input_data.columns.values
len_df = len(input_data)
nan_features = list(
filter(
lambda x: x is not False,
map(
lambda x: x
if self.input_data[x].isna().sum() / len_df > 0.25
else False,
f,
),
)
)
return nan_features
def problematic_features(self):
f = self.input_data.columns.values
problematic_features = []
for i in f:
if "Id" in i:
problematic_features.append(i)
elif "ID" in i:
problematic_features.append(i)
return problematic_features
def undefined_features(self):
if self.features == "infer":
undefined_features = list(self.input_data.columns)
undefined_features.remove(self.target)
for i in self.nan_features:
undefined_features.remove(i)
for i in self.problematic_features:
undefined_features.remove(i)
return undefined_features
def numeric_features(self):
numeric_features = []
l = self.undefined_features
for i in l:
if self.input_data[i].dtype in ["float64", "float", "int", "int64"]:
if len(self.input_data[i].value_counts()) / len(self.input_data) < 0.1:
pass
else:
numeric_features.append(i)
return numeric_features
def categorical_features(self, threshold=10):
self.undefined_features
categorical_features = []
to_remove = []
l = self.undefined_features
for i in l:
if len(self.input_data[i].value_counts()) / len(self.input_data[i]) < 0.10:
categorical_features.append(i)
return categorical_features
def indicator(self):
indicator = MissingIndicator(features="all")
indicator.fit(self.train_data[self.undefined_features])
return indicator
def train_missing_indicator_df(self):
x = self.indicator.transform(self.train_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = [
i
for i in list(missing_indicator_df.columns.values)
if missing_indicator_df[i].max() == True
]
return missing_indicator_df[columns].replace({True: 1, False: 0})
def valid_missing_indicator_df(self):
x = self.indicator.transform(self.valid_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = list(self.train_missing_indicator_df)
return missing_indicator_df[columns].replace({True: 1, False: 0})
def numeric_imputer(self):
numeric_imputer = SimpleImputer(strategy="median", copy=True)
numeric_imputer.fit(self.train_data[self.numeric_features])
return numeric_imputer
def categorical_imputer(self):
categorical_imputer = SimpleImputer(strategy="most_frequent", copy=True)
categorical_imputer.fit(self.train_data[self.categorical_features])
return categorical_imputer
def train_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.train_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def valid_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.valid_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def yeo_johnson_transformer(self):
yeo_johnson_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_transformer.fit(self.train_imputed_numeric_df)
return yeo_johnson_transformer
def yeo_johnson_target_transformer(self):
yeo_johnson_target_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_target_transformer.fit(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
return yeo_johnson_target_transformer
def train_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.train_imputed_numeric_df)
columns = self.train_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def valid_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.valid_imputed_numeric_df)
columns = self.valid_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def train_transformed_target(self):
if self.problem == "binary classification":
return self.train_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
s = | pd.DataFrame(s, columns=[self.target]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 11:19:12 2019
@author: salilsharma
"""
#Identify trucks; genrate data for Biogeme
import json
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
from localfunctions import*
from clusterAlgorithm import*
import pickle
start_time = time.time()
Path='2-1_1'
Seg='seg1'
filter = 54.6
Masterpath='xxx'
whichDate=['10-02','10-03','10-04','10-06','10-09','10-10','10-11','10-12','10-13','10-16','10-17','10-18','10-19','10-20','10-23','10-24','10-25','10-26','10-27','10-30','10-31','11-01','11-02','11-03','11-06','11-07','11-08','11-09','11-10','11-13','11-14','11-15','11-16','11-17','11-20','11-21','11-22','11-23','11-24']
# Create an empty pandas dataframe
df1 = pd.DataFrame()
for datelist in range(0,len(whichDate)):
#Access master list for paths
master_list= | pd.read_pickle(Masterpath + 'Recursive_logit/ODP/'+Path+'/' + Path+ "_masterList.pkl") | pandas.read_pickle |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression
import ipywidgets as widgets
st.write("""
# Hackaton Navi-Capital
Ferramenta para ajudar investidores a avaliar a pontuação ESG de empresas
"""
)
### abrir dataframes desejados
df_companies = pd.read_csv("datasets/companies_br.csv")
df_companies_financials = pd.read_csv("datasets/companies_financials_br.csv")
esg_scores = pd.read_csv("datasets/esg_scores_history_br.csv")
### manipulacao dos dataframes
esg_scores = esg_scores[esg_scores.score_value != 0]
esg_scores = esg_scores.dropna()
esg_scores.drop_duplicates(subset=['assessment_year', 'company_id', 'aspect'], inplace = True)
df_companies = df_companies[df_companies.company_id.isin(esg_scores.company_id.unique())] ##ja tirou companhias repetidas
df_companies_financials['ref_year'] = | pd.to_datetime(df_companies_financials.ref_date) | pandas.to_datetime |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
sys.path.append("../")
from DAL import labConn2
from LOG import logs_APP as log
import plotly.express as px
# Com o pandas monsta um dataFrame com os dados do Excel
_arq = | pd.read_excel(r"C:\Claro\Desenvolvimento\Python\server.xlsx") | pandas.read_excel |
import pandas as pd
import logging
modlog = logging.getLogger('capture.generate.calcs')
def mmolextension(reagentdf, rdict, experiment, reagent):
"""TODO Pendltoonize this docc"""
mmoldf = (pd.DataFrame(reagentdf))
portionmmoldf = pd.DataFrame()
for chemlistlocator, conc in (rdict['%s' %reagent].concs.items()):
listposition = chemlistlocator.split('m')[1]
chemnameint = int(listposition)
truechemname = rdict['%s'%reagent].chemicals[chemnameint-1]
newmmoldf = mmoldf * conc / 1000
oldheaders = newmmoldf.columns
newmmoldf.rename\
(columns={'Reagent%s (ul)'%reagent:'mmol_experiment%s_reagent%s_%s' \
%(experiment, reagent, truechemname)}, inplace=True)
modlog.info('dataframe columns: %s renamed to: %s'%(oldheaders, newmmoldf.columns))
portionmmoldf = pd.concat([portionmmoldf, newmmoldf], axis=1)
return portionmmoldf
def finalmmolsums(chemicals, mmoldf):
"""TODO Pendltoonize this docc"""
finalsummedmmols = | pd.DataFrame() | pandas.DataFrame |
# Notebook to transform OSeMOSYS output to same format as EGEDA
# Import relevant packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
import glob
import re
# Path for OSeMOSYS output
path_output = './data/3_OSeMOSYS_output'
# Path for OSeMOSYS to EGEDA mapping
path_mapping = './data/2_Mapping_and_other'
# Where to save finalised dataframe
path_final = './data/4_Joined'
# OSeMOSYS results files
OSeMOSYS_filenames = glob.glob(path_output + "/*.xlsx")
# Reference filenames and net zero filenames
reference_filenames = list(filter(lambda k: 'reference' in k, OSeMOSYS_filenames))
netzero_filenames = list(filter(lambda y: 'net-zero' in y, OSeMOSYS_filenames))
# New 2018 data variable names
Mapping_sheets = list(pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = None).keys())[1:]
Mapping_file = pd.DataFrame()
for sheet in Mapping_sheets:
interim_map = pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = sheet, skiprows = 1)
Mapping_file = Mapping_file.append(interim_map).reset_index(drop = True)
# Moving everything from OSeMOSYS to EGEDA for TFC and TPES
Mapping_TFC_TPES = Mapping_file[Mapping_file['Balance'].isin(['TFC', 'TPES'])]
# And for transformation
Map_trans = Mapping_file[Mapping_file['Balance'] == 'TRANS'].reset_index(drop = True)
# A mapping just for i) power, ii) ref, own, sup and iii) hydrogen
Map_power = Map_trans[Map_trans['Sector'] == 'POW'].reset_index(drop = True)
Map_refownsup = Map_trans[Map_trans['Sector'].isin(['REF', 'SUP', 'OWN', 'HYD'])].reset_index(drop = True)
Map_hydrogen = Map_trans[Map_trans['Sector'] == 'HYD'].reset_index(drop = True)
# Define unique workbook and sheet combinations for TFC and TPES
Unique_TFC_TPES = Mapping_TFC_TPES.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
# Define unique workbook and sheet combinations for Transformation
Unique_trans = Map_trans.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
################################### TFC and TPES #############################################################
# Determine list of files to read based on the workbooks identified in the mapping file for REFERENCE scenario
ref_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in reference_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
ref_file_df = ref_file_df.append(_file)
ref_file_df = ref_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Determine list of files to read based on the workbooks identified in the mapping file for NET-ZERO scenario
netz_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in netzero_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
netz_file_df = netz_file_df.append(_file)
netz_file_df = netz_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Create empty dataframe to store REFERENCE aggregated results
ref_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (ref_aggregate_df1)
if ref_file_df['File'].isna().any() == False:
for i in range(ref_file_df.shape[0]):
_df = pd.read_excel(ref_file_df.iloc[i, 0], sheet_name = ref_file_df.iloc[i, 2])
_df['Workbook'] = ref_file_df.iloc[i, 1]
_df['Sheet_energy'] = ref_file_df.iloc[i, 2]
ref_aggregate_df1 = ref_aggregate_df1.append(_df)
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
ref_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = ref_aggregate_df1[~ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
ref_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Create empty dataframe to store NET ZERO aggregated results
netz_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (netz_aggregate_df1)
if netz_file_df['File'].isna().any() == False:
for i in range(netz_file_df.shape[0]):
_df = pd.read_excel(netz_file_df.iloc[i, 0], sheet_name = netz_file_df.iloc[i, 2])
_df['Workbook'] = netz_file_df.iloc[i, 1]
_df['Sheet_energy'] = netz_file_df.iloc[i, 2]
netz_aggregate_df1 = netz_aggregate_df1.append(_df)
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
netz_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = netz_aggregate_df1[~netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
netz_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Now aggregate all the results for APEC
# REFERENCE
APEC_ref = ref_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_ref['REGION'] = 'APEC'
ref_aggregate_df1 = ref_aggregate_df1.append(APEC_ref).reset_index(drop = True)
# NET ZERO
APEC_netz = netz_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_netz['REGION'] = 'APEC'
netz_aggregate_df1 = netz_aggregate_df1.append(APEC_netz).reset_index(drop = True)
# Now aggregate results for 22_SEA
# Southeast Asia: 02, 07, 10, 15, 17, 19, 21
# REFERENCE
SEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_ref['REGION'] = '22_SEA'
ref_aggregate_df1 = ref_aggregate_df1.append(SEA_ref).reset_index(drop = True)
# NET ZERO
SEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_netz['REGION'] = '22_SEA'
netz_aggregate_df1 = netz_aggregate_df1.append(SEA_netz).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# REFERENCE
NEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_ref['REGION'] = '23_NEA'
ref_aggregate_df1 = ref_aggregate_df1.append(NEA_ref).reset_index(drop = True)
# NET ZERO
NEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_netz['REGION'] = '23_NEA'
netz_aggregate_df1 = netz_aggregate_df1.append(NEA_netz).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# REFERENCE
ONEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_ref['REGION'] = '23b_ONEA'
ref_aggregate_df1 = ref_aggregate_df1.append(ONEA_ref).reset_index(drop = True)
# NET ZERO
ONEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_netz['REGION'] = '23b_ONEA'
netz_aggregate_df1 = netz_aggregate_df1.append(ONEA_netz).reset_index(drop = True)
# Aggregate results for 24_OAM
# OAM: 03, 04, 11, 14
# REFERENCE
OAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_ref['REGION'] = '24_OAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OAM_ref).reset_index(drop = True)
# NET ZERO
OAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_netz['REGION'] = '24_OAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OAM_netz).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# REFERENCE
OOAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_ref['REGION'] = '24b_OOAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OOAM_ref).reset_index(drop = True)
# NET ZERO
OOAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_netz['REGION'] = '24b_OOAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OOAM_netz).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# REFERENCE
OCE_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_ref['REGION'] = '25_OCE'
ref_aggregate_df1 = ref_aggregate_df1.append(OCE_ref).reset_index(drop = True)
# NET ZERO
OCE_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_netz['REGION'] = '25_OCE'
netz_aggregate_df1 = netz_aggregate_df1.append(OCE_netz).reset_index(drop = True)
# Get maximum REFERENCE year column to build data frame below
ref_year_columns = []
for item in list(ref_aggregate_df1.columns):
try:
ref_year_columns.append(int(item))
except ValueError:
pass
max_year_ref = max(ref_year_columns)
OSeMOSYS_years_ref = list(range(2017, max_year_ref + 1))
# Get maximum NET ZERO year column to build data frame below
netz_year_columns = []
for item in list(netz_aggregate_df1.columns):
try:
netz_year_columns.append(int(item))
except ValueError:
pass
max_year_netz = max(netz_year_columns)
OSeMOSYS_years_netz = list(range(2017, max_year_netz + 1))
#################################################################################################
### ADJUNCT; LAST MINUTE GRAB of LNG/PIPELINE imports and exports which are only from OSeMOSYS
# This script is a bit messy as there are two chunks that have ref_aggregate_df1
# Building the grab here as it grabs from the first ref_aggregate_df1 which is more comprehensive
# i.e. it has region aggregates such as OOAM, OCE and APEC in addition to economies
ref_lngpipe_1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_lngpipe_1.to_csv(path_final + '/lngpipe_reference.csv', index = False)
netz_lngpipe_1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_netz].reset_index(drop = True)
netz_lngpipe_1.to_csv(path_final + '/lngpipe_netzero.csv', index = False)
###################################################################################################
########################## fuel_code aggregations ##########################
# First level
coal_fuels = ['1_1_coking_coal', '1_5_lignite', '1_x_coal_thermal']
oil_fuels = ['6_1_crude_oil', '6_x_ngls']
petrol_fuels = ['7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_x_jet_fuel', '7_6_kerosene', '7_7_gas_diesel_oil',
'7_8_fuel_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane', '7_x_other_petroleum_products']
gas_fuels = ['8_1_natural_gas', '8_2_lng', '8_3_gas_works_gas']
biomass_fuels = ['15_1_fuelwood_and_woodwaste', '15_2_bagasse', '15_3_charcoal', '15_4_black_liquor', '15_5_other_biomass']
other_fuels = ['16_1_biogas', '16_2_industrial_waste', '16_3_municipal_solid_waste_renewable', '16_4_municipal_solid_waste_nonrenewable', '16_5_biogasoline', '16_6_biodiesel',
'16_7_bio_jet_kerosene', '16_8_other_liquid_biofuels', '16_9_other_sources', '16_x_hydrogen']
# Total
total_fuels = ['1_coal', '2_coal_products', '5_oil_shale_and_oil_sands', '6_crude_oil_and_ngl', '7_petroleum_products', '8_gas', '9_nuclear', '10_hydro', '11_geothermal',
'12_solar', '13_tide_wave_ocean', '14_wind', '15_solid_biomass', '16_others', '17_electricity', '18_heat']
# total_renewables to be completed
##############################################################################
# item_code_new aggregations
# Lowest level
industry_agg = ['14_1_iron_and_steel', '14_2_chemical_incl_petrochemical', '14_3_non_ferrous_metals', '14_4_nonmetallic_mineral_products', '14_5_transportation_equipment',
'14_6_machinery', '14_7_mining_and_quarrying', '14_8_food_beverages_and_tobacco', '14_9_pulp_paper_and_printing', '14_10_wood_and_wood_products',
'14_11_construction', '14_12_textiles_and_leather', '14_13_nonspecified_industry']
transport_agg = ['15_1_domestic_air_transport', '15_2_road', '15_3_rail', '15_4_domestic_navigation', '15_5_pipeline_transport', '15_6_nonspecified_transport']
others_agg = ['16_1_commercial_and_public_services', '16_2_residential', '16_3_agriculture', '16_4_fishing', '16_5_nonspecified_others']
# Then first level
tpes_agg = ['1_indigenous_production', '2_imports', '3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers', '6_stock_change']
tfc_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector', '17_nonenergy_use']
tfec_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector']
# For dataframe finalising
key_variables = ['economy', 'fuel_code', 'item_code_new']
#######################################################################################################################
# REFERENCE
# Now aggregate data based on the mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
ref_aggregate_df2 = | pd.DataFrame() | pandas.DataFrame |
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# # IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref
"""
unused
sys, imp,
"""
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
#from collections import OrderedDict
from hp.dict import MyOrderedDict as OrderedDict
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from hp.basic import OrderedSet
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
import hp.plot
import hp.basic
import hp.pd
import hp.oop
import hp.data
import fdmg.datos as datos
import matplotlib.pyplot as plt
import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
import udev.scripts
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
idx = pd.IndexSlice
class Fdmg( #flood damage model
hp.sel.Sel_controller, #no init
hp.dyno.Dyno_wrap, #add some empty containers
hp.plot.Plot_o, #build the label
hp.sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp.oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp.oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price', 'write_fdmg_sum_fly'])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
#lsit of data o names expected on the fdmg tab
#state = 'na' #for tracking what flood aep is currently in the model
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = 0
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# pre checks
#=======================================================================
self.check_pars() #check the data loaded on your tab
if not self.session._write_data:
self.write_fdmg_sum = False
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
if self.db_f:
if not self.model.__repr__() == self.__repr__():
raise IOError
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
def check_pars(self): #check your data pars
df_raw = self.session.pars_df_d['datos']
#=======================================================================
# check mandatory data objects
#=======================================================================
if not 'binv' in df_raw['name'].tolist():
raise IOError
#=======================================================================
# check optional data objects
#=======================================================================
fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
boolidx = df_raw['name'].isin(fdmg_tab_nl)
if not np.all(boolidx):
raise IOError #passed some unexpected data names
return
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in self.fdmgo_d.keys():
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'hse_type', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self, df_raw=None): #build a df from the dfunc tab
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
dfunc_ecols = ['place_code','dmg_code','dfunc_type','anchor_ht_code']
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all')
df2 = df1.dropna(axis='index', how='all') #drop rows with all na
#column check
if not hp.pd.header_check(df2, dfunc_ecols, logger=logger):
raise IOError
#=======================================================================
# custom columns
#=======================================================================
df3 = df2.copy(deep=True)
df3['dmg_type'] = df3['place_code'] + df3['dmg_code']
df3['name'] = df3['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df3.columns:
boolidx = ~pd.isnull(df3['tailpath']) #get dfuncs with data requests
self.load_raw_dfunc(df3[boolidx])
df3 = df3.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns
#=======================================================================
# garage checking
#=======================================================================
boolidx = np.logical_and(df3['place_code'] == 'G', df3['dfunc_type'] == 'rfda')
if np.any(boolidx):
logger.error('got dfunc_type = rfda for a garage curve (no such thing)')
raise IOError
#=======================================================================
# get special lists
#=======================================================================
#dmg_types
self.dmg_types = df3['dmg_type'].tolist()
#damage codes
boolidx = df3['place_code'].str.contains('total')
self.dmg_codes = df3.loc[~boolidx, 'dmg_code'].unique().tolist()
#place_codes
place_codes = df3['place_code'].unique().tolist()
if 'total' in place_codes: place_codes.remove('total')
self.place_codes = place_codes
self.session.pars_df_d['dfunc'] = df3
logger.debug('dfunc_df with %s'%str(df3.shape))
#=======================================================================
# get slice for houses
#=======================================================================
#identify all the entries except total
boolidx = df3['place_code'] != 'total'
self.house_childmeta_df = df3[boolidx] #get this trim
"""
hp.pd.v(df3)
"""
def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo)
logger = self.logger.getChild('load_hse_geo')
#=======================================================================
# load and clean the pars
#=======================================================================
df_raw = hp.pd.load_xls_df(self.session.parspath,
sheetname = 'hse_geo', header = [0,1], logger = logger)
df = df_raw.dropna(how='all', axis = 'index')
self.session.pars_df_d['hse_geo'] = df
#=======================================================================
# build a blank starter for each house to fill
#=======================================================================
omdex = df.columns #get the original mdex
'probably a cleaner way of doing this'
lvl0_values = omdex.get_level_values(0).unique().tolist()
lvl1_values = omdex.get_level_values(1).unique().tolist()
lvl1_values.append('t')
newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['place_code','finish_code'])
geo_dxcol = pd.DataFrame(index = df.index, columns = newcols) #make the frame
self.geo_dxcol_blank = geo_dxcol
if self.db_f:
if np.any(pd.isnull(df)):
raise IOError
l = geo_dxcol.index.tolist()
if not l == [u'area', u'height', u'per', u'inta']:
raise IOError
return
def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs
logger = self.logger.getChild('load_raw_dfunc')
logger.debug('with df \'%s\''%(str(meta_df_raw.shape)))
d = dict() #empty container
meta_df = meta_df_raw.copy()
#=======================================================================
# loop through each row and load the data
#=======================================================================
for indx, row in meta_df.iterrows():
inpath = os.path.join(row['headpath'], row['tailpath'])
df = hp.pd.load_smart_df(inpath,
index_col =None,
logger = logger)
d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu
logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), d.keys()))
self.dfunc_raw_d = d
return
def load_floods(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_floods')
logger.debug('setting floods df \n')
self.set_floods_df()
df = self.floods_df
logger.debug('raising floods \n')
d = self.raise_children_df(df, #build flood children
kid_class = Flood,
dup_sibs_f= True,
container = OrderedDict) #pass attributes from one tot eh next
#=======================================================================
# ordered by aep
#=======================================================================
fld_aep_od = OrderedDict()
for childname, childo in d.iteritems():
if hasattr(childo, 'ari'):
fld_aep_od[childo.ari] = childo
else: raise IOError
logger.info('raised and bundled %i floods by aep'%len(fld_aep_od))
self.fld_aep_od = fld_aep_od
return
def set_floods_df(self): #build the flood meta data
logger = self.logger.getChild('set_floods_df')
df_raw = self.session.pars_df_d['floods']
df1 = df_raw.sort_values('ari').reset_index(drop=True)
df1['ari'] = df1['ari'].astype(np.int)
#=======================================================================
# slice for debug set
#=======================================================================
if self.db_f & (not self.dbg_fld_cnt == 'all'):
#check that we even have enough to do the slicing
if len(df1) < 2:
logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all')
raise IOError
df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame
dbg_fld_cnt = int(self.dbg_fld_cnt)
logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1)))
#===================================================================
# try to pull out and add the 100yr
#===================================================================
try:
boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl
if not boolidx.sum() == 1:
logger.debug('failed to locate 1 flood')
raise IOError
df2 = df2.append(df1[boolidx]) #add this row to the end
df1 = df1[~boolidx] #slice out this row
dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1
dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set
logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt))
except:
logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl)
df2 = df1.copy()
#===================================================================
# build list of extreme (low/high) floods
#===================================================================
evn_cnt = 0
odd_cnt = 0
for cnt in range(0, dbg_fld_cnt, 1):
if cnt % 2 == 0: #evens. pull from front
idxr = evn_cnt
evn_cnt += 1
else: #odds. pull from end
idxr = len(df1) - odd_cnt - 1
odd_cnt += 1
logger.debug('pulling flood with indexer %i'%(idxr))
ser = df1.iloc[idxr, :] #make thsi slice
df2 = df2.append(ser) #append this to the end
#clean up
df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True)
logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist()))
if not len(df) == int(self.dbg_fld_cnt):
raise IOError
else:
df = df1.copy()
if not len(df) > 0: raise IOError
self.floods_df = df
return
def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab
#logger = self.logger.getChild('set_area_prot_lvl')
"""
TODO: Consider moving this onto the binv and making the binv dynamic...
Calls:
handles for flood_tbl_nm
"""
logger = self.logger.getChild('set_area_prot_lvl')
logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm)
#=======================================================================
# get data
#=======================================================================
ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object
ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl')
"""
hp.pd.v(binv_df)
type(df.iloc[:, 0])
"""
return True
def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data
logger = self.logger.getChild('set_fhr')
logger.debug('assigning for \'fhz\' and \'bfe\'')
#get the data for this fhr set
fhr_tbl_o = self.fdmgo_d['fhr_tbl']
try:
df = fhr_tbl_o.d[self.fhr_nm]
except:
if not self.fhr_nm in fhr_tbl_o.d.keys():
logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s'
%(self.fhr_nm, fhr_tbl_o.d.keys()))
raise IOError
#=======================================================================
# loop through each series and apply
#=======================================================================
"""
not the most generic way of handling this...
todo:
add generic method to the binv
can take ser or df
updates the childmeta_df if before init
updates the children if after init
"""
for hse_attn in ['fhz', 'bfe']:
ser = df[hse_attn]
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
self.binv.set_all_hse_atts(hse_attn, ser = ser)
else:
logger.debug('set column \'%s\' onto the binv_df'%hse_attn)
self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf
"""I dont like this
fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm)
fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)"""
return True
def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format
'kept this special syntax reader separate in case we want to change th eformat of the flood tables'
flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table
fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list
return fld_aep_l
def run(self, **kwargs): #placeholder for simulation runs
logger = self.logger.getChild('run')
logger.debug('on run_cnt %i'%self.run_cnt)
self.run_cnt += 1
self.state='run'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not isinstance(self.outpath, basestring):
raise IOError
logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg')
logger.info('for run_cnt %i'%self.run_cnt)
self.calc_fld_set(**kwargs)
return
def setup_res_dxcol(self, #setup the results frame
fld_aep_l = None,
#dmg_type_list = 'all',
bid_l = None):
#=======================================================================
# defaults
#=======================================================================
if bid_l == None: bid_l = self.binv.bid_l
if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary
#if dmg_type_list=='all': dmg_type_list = self.dmg_types
#=======================================================================
# setup the dxind for writing
#=======================================================================
lvl0_values = fld_aep_l
lvl1_values = self.dmg_df_cols #include extra reporting columns
#fold these into a mdex (each flood_aep has all dmg_types)
columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['flood_aep','hse_atts'])
dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame
self.dmg_dx_base = dmg_dx.copy()
if self.db_f:
logger = self.logger.getChild('setup_res_dxcol')
if not self.beg_hist_df == False:
fld_aep_l.sort()
columns = pd.MultiIndex.from_product([fld_aep_l, ['bsmt_egrd', 'cond']],
names=['flood_aep','bsmt_egrd'])
self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns)
logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape))
else:
self.beg_hist_df = None
"""
dmg_dx.columns
"""
return
def calc_fld_set(self, #calc flood damage for the flood set
fld_aep_l = None, #list of flood aeps to calcluate
#dmg_type_list = 'all', #list of damage types to calculate
bid_l = None, #list of building names ot calculate
wsl_delta = None, #delta value to add to all wsl
wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used)
**run_fld): #kwargs to send to run_fld
'we could separate the object creation and the damage calculation'
"""
#=======================================================================
# INPUTS
#=======================================================================
fld_aep_l: list of floods to calc
this can be a custom list built by the user
extracted from the flood table (see session.get_ftbl_aeps)
loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\
bid_l: list of ids (matching the mind varaible set under Fdmg)
#=======================================================================
# OUTPUTS
#=======================================================================
dmg_dx: dxcol of flood damage across all dmg_types and floods
mdex
lvl0: flood aep
lvl1: dmg_type + extra cols
I wanted to have this flexible, so the dfunc could pass up extra headers
couldnt get it to work. instead used a global list and acheck
new headers must be added to the gloabl list and Dfunc.
index
bldg_id
#=======================================================================
# TODO:
#=======================================================================
setup to calc across binvs as well
"""
#=======================================================================
# defaults
#=======================================================================
start = time.time()
logger = self.logger.getChild('calc_fld_set')
if wtf is None: wtf = self.session.write_fdmg_set_dx
if wsl_delta is None: wsl_delta= self.wsl_delta
#=======================================================================
# setup and load the results frame
#=======================================================================
#check to see that all of these conditions pass
if not np.all([bid_l is None, fld_aep_l is None]):
logger.debug('non default run. rebuild the dmg_dx_base')
#non default run. rebuild the frame
self.setup_res_dxcol( fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l)
elif self.dmg_dx_base is None: #probably the first run
if not self.run_cnt == 1: raise IOError
logger.debug('self.dmg_dx_base is None. rebuilding')
self.setup_res_dxcol(fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l) #set it up with the defaults
dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base
#=======================================================================
# finish defaults
#=======================================================================
'these are all mostly for reporting'
if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary
""" leaving these as empty kwargs and letting floods handle
if bid_l == None: bid_l = binv_dato.bid_l
if dmg_type_list=='all': dmg_type_list = self.dmg_types """
"""
lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist()
lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()"""
logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta))
logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n')
#=======================================================================
# loop and calc eacch flood
#=======================================================================
fcnt = 0
first = True
for flood_aep in fld_aep_l: #lopo through and build each flood
#self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling
self.state = flood_aep
'useful for keeping track of what the model is doing'
#get teh flood
flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary
logger.debug('getting dmg_df for %s'%flood_dato.name)
#===================================================================
# run sequence
#===================================================================
#get damage for these depths
dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice
if dmg_df is None: continue #skip this one
#===================================================================
# wrap up
#===================================================================
dmg_dx[flood_aep] = dmg_df #store into the frame
fcnt += 1
logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape)))
#===================================================================
# checking
#===================================================================
if self.db_f:
#check that the floods are increasing
if first:
first = False
last_aep = None
else:
if not flood_aep > last_aep:
raise IOError
last_aep = flood_aep
#=======================================================================
# wrap up
#=======================================================================
self.state = 'na'
if wtf:
filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing
self.dmg_dx = dmg_dx
stop = time.time()
logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l)))
return
def get_results(self): #called by Timestep.run_dt()
self.state='wrap'
logger = self.logger.getChild('get_results')
#=======================================================================
# optionals
#=======================================================================
s = self.session.outpars_d[self.__class__.__name__]
if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum):
logger.debug("calc_summaries \n")
dmgs_df = self.calc_summaries()
self.dmgs_df = dmgs_df.copy()
else: dmgs_df = None
if ('ead_tot' in s) or ('dmg_df' in s):
logger.debug('\n')
self.calc_annulized(dmgs_df = dmgs_df, plot_f = False)
'this will also run calc_sumamries if it hasnt happened yet'
if 'dmg_tot' in s:
#get a cross section of the 'total' column across all flood_aeps and sum for all entries
self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum()
if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s):
logger.debug('get_fld_begrd_cnt')
self.get_fld_begrd_cnt()
if 'fld_pwr_cnt' in s:
logger.debug('calc_fld_pwr_cnt \n')
cnt = 0
for aep, obj in self.fld_aep_od.iteritems():
if obj.gpwr_f: cnt +=1
self.fld_pwr_cnt = cnt
self.binv.calc_binv_stats()
if self.session.write_fdmg_fancy:
self.write_res_fancy()
if self.write_fdmg_sum_fly: #write the results after each run
self.write_dmg_fly()
if self.db_f:
self.check_dmg_dx()
logger.debug('finished \n')
def calc_summaries(self, #annualize the damages
fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary
dmg_dx=None,
plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr
wtf=None):
"""
basically dropping dimensions on the outputs and adding annuzlied damages
#=======================================================================
# OUTPUTS
#=======================================================================
DROP BINV DIMENSIOn
dmgs_df: df with
columns: raw damage types, and annualized damage types
index: each flood
entries: total damage for binv
DROP FLOODS DIMENSIOn
aad_sum_ser
DROP ALL DIMENSIONS
ead_tot
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_summaries')
if dmg_dx is None: dmg_dx = self.dmg_dx.copy()
if plot is None: plot = self.session._write_figs
if wtf is None: wtf = self.write_fdmg_sum
#=======================================================================
# #setup frame
#=======================================================================
#get the columns
dmg_types = self.dmg_types + ['total']
#=======================================================================
# #build the annualized damage type names
#=======================================================================
admg_types = []
for entry in dmg_types: admg_types.append(entry+'_a')
cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l
self.dmg_df_cols
"""
hp.pd.v(dmg_dx)
"""
dmgs_df = pd.DataFrame(columns = cols)
dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique()
dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True)
#=======================================================================
# loop through and fill out the data
#=======================================================================
for index, row in dmgs_df.iterrows(): #loop through an dfill out
dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep
#sum all the damage types
for dmg_type in dmg_types:
row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up
#calc the probability
row['prob_raw'] = 1/float(row['ari']) #inverse of aep
row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier
#calculate the annualized damages
for admg_type in admg_types:
dmg_type = admg_type[:-2] #drop the a
row[admg_type] = row[dmg_type] * row['prob']
#===================================================================
# get stats from the floodo
#===================================================================
floodo = self.fld_aep_od[row['ari']]
for attn in fsts_l:
row[attn] = getattr(floodo, attn)
#===================================================================
# #add this row backinto the frame
#===================================================================
dmgs_df.loc[index,:] = row
#=======================================================================
# get series totals
#=======================================================================
dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true')
#=======================================================================
# closeout
#=======================================================================
logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df)))
if wtf:
filetail = '%s dmg_sumry'%(self.session.state)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing
logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist()))
if plot:
self.plot_dmgs(wtf=wtf)
#=======================================================================
# post check
#=======================================================================
if self.db_f:
#check for sort logic
if not dmgs_df.loc[:,'prob'].is_monotonic:
raise IOError
if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order
logger.warning('bigger floods arent causing more damage')
'some of the flood tables seem bad...'
#raise IOError
#all probabilities should be larger than zero
if not np.all(dmgs_df.loc[:,'prob'] > 0):
raise IOError
return dmgs_df
def calc_annulized(self, dmgs_df = None,
ltail = None, rtail = None, plot_f=None,
dx = 0.001): #get teh area under the damage curve
"""
#=======================================================================
# INPUTS
#=======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
'none': don't extend the tail
rtail: right trail treatment (high prob low damage)
'none': don't extend
'2year': extend to zero damage at the 2 year aep
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_annulized')
if ltail is None: ltail = self.ca_ltail
if rtail is None: rtail = self.ca_rtail
'plotter ignores passed kwargs here'
if plot_f is None: plot_f= self.session._write_figs
#=======================================================================
# get data
#=======================================================================
if dmgs_df is None:
dmgs_df = self.calc_summaries()
#df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True)
'only slicing columns for testing'
df = dmgs_df.copy().reset_index(drop=True)
if len(df) == 1:
logger.warning('only got one flood entry. skipping')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape)))
if self.db_f:
if len(df) <2:
logger.error('didnt get enough flood entries to calcluate EAD')
raw_input('press enter to continue any way....')
#=======================================================================
# left tail treatment
#=======================================================================
if ltail == 'flat':
#zero probability
'assume 1000yr flood is the max damage'
max_dmg = df['total'].max()*1.0001
df.loc[-1, 'prob'] = 0
df.loc[-1, 'ari'] = 999999
df.loc[-1, 'total'] = max_dmg
logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg)
elif ltail == 'none':
pass
else: raise IOError
'todo: add option for value multiplier'
#=======================================================================
# right tail
#=======================================================================
if rtail == 'none':
pass
elif hp.basic.isnum(rtail):
rtail_yr = float(rtail)
rtail_p = 1.0 / rtail_yr
max_p = df['prob'].max()
#floor check
if rtail_p < max_p:
logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p))
raise IOError
#same
elif rtail_p == max_p:
logger.debug("rtail_p == min(xl. no changes made")
else:
logger.debug("adding zero damage for aep = %.1f"%rtail_yr)
#zero damage
'assume no damage occurs at the passed rtail_yr'
loc = len(df)
df.loc[loc, 'prob'] = rtail_p
df.loc[loc, 'ari'] = 1.0/rtail_p
df.loc[loc, 'total'] = 0
"""
hp.pd.view_web_df(self.data)
"""
else: raise IOError
#=======================================================================
# clean up
#=======================================================================
df = df.sort_index() #resort the index
if self.db_f:
'these should still hold'
if not df.loc[:,'prob'].is_monotonic:
raise IOError
"""see above
if not df['total'].iloc[::-1].is_monotonic:
raise IOError"""
x, y = df['prob'].values.tolist(), df['total'].values.tolist()
#=======================================================================
# find area under curve
#=======================================================================
try:
#ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg')
'this was giving some weird results'
ead_tot = scipy.integrate.trapz(y, x, dx = dx)
except:
logger.warning('scipy.integrate.trapz failed. setting ead_tot to zero')
ead_tot = 0
raise IOError
logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\''
%(ead_tot, len(y), ltail, rtail))
self.ead_tot = ead_tot
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if pd.isnull(ead_tot):
raise IOError
if not isinstance(ead_tot, float):
raise IOError
if ead_tot <=0:
raise IOError
#=======================================================================
# update data with tails
#=======================================================================
self.dmgs_df_wtail = df.sort_index().reset_index(drop=True)
#=======================================================================
# generate plot
#=======================================================================
if plot_f:
self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False)
return
def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood
logger = self.logger.getChild('get_fld_begrd_cnt')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
#lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist()
#get all teh basement egrade types
df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values
#get occurances by value
d = hp.pd.sum_occurances(df1, logger=logger)
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i bsmt_egrds: %s'%(len(d), d.keys()))
for bsmt_egrd, cnt in d.iteritems():
attn = 'b'+bsmt_egrd +'_cnt'
logger.debug('for \'%s\' got %i'%(attn, cnt))
setattr(self, attn, cnt)
logger.debug('finished \n')
def check_dmg_dx(self): #check logical consistency of the damage results
logger = self.logger.getChild('check_dmg_dx')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
mdex = dmg_dx.columns
aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist()
aep_l.sort()
#=======================================================================
# check that each flood increases in damage
#=======================================================================
total = None
aep_last = None
for aep in aep_l:
#get this slice
df = dmg_dx[aep]
if total is None:
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
total = df.loc[:,boolcol].sum().sum()
if not aep == min(aep_l):
raise IOError
else:
newtot = df.loc[:,boolcol].sum().sum()
if not newtot >= total:
logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total))
#raise IOError
#print 'new tot %.2f > oldtot %.2f'%(newtot, total)
total = newtot
aep_last = aep
return
def wrap_up(self):
#=======================================================================
# update asset containers
#=======================================================================
"""
#building inventory
'should be flagged for updating during House.notify()'
if self.binv.upd_kid_f:
self.binv.update()"""
"""dont think we need this here any more.. only on udev.
keeping it just to be save"""
self.last_tstep = copy.copy(self.time)
self.state='close'
def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr
dmg_dx=None,
include_ins = False,
include_raw = False,
include_begh = False):
"""
#=======================================================================
# INPUTS
#=======================================================================
include_ins: whether ot add inputs as tabs.
ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('write_res_fancy')
if dmg_dx is None: dmg_dx = self.dmg_dx
if dmg_dx is None:
logger.warning('got no dmg_dx. skipping')
return
#=======================================================================
# setup
#=======================================================================
od = OrderedDict()
#=======================================================================
# add the parameters
#=======================================================================
#get the blank frame
df = pd.DataFrame(columns = ['par','value'] )
df['par'] = list(self.try_inherit_anl)
for indx, row in df.iterrows():
df.iloc[indx, 1] = getattr(self, row['par']) #set this value
od['pars'] = df
#=======================================================================
# try and add damage summary
#=======================================================================
if not self.dmgs_df is None:
od['dmg summary'] = self.dmgs_df
#=======================================================================
# #get theh dmg_dx decomposed
#=======================================================================
od.update(hp.pd.dxcol_to_df_set(dmg_dx, logger=self.logger))
#=======================================================================
# #add dmg_dx as a raw tab
#=======================================================================
if include_raw:
od['raw_res'] = dmg_dx
#=======================================================================
# add inputs
#=======================================================================
if include_ins:
for dataname, dato in self.kids_d.iteritems():
if hasattr(dato, 'data') & hp.pd.isdf(dato.data):
od[dataname] = dato.data
#=======================================================================
# add debuggers
#=======================================================================
if include_begh:
if not self.beg_hist_df is None:
od['beg_hist'] = self.beg_hist_df
#=======================================================================
# #write to excel
#=======================================================================
filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp.pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger)
return
def write_dmg_fly(self): #write damage results after each run
logger = self.logger.getChild('write_dmg_fly')
dxcol = self.dmg_dx #results
#=======================================================================
# build the resuults summary series
#=======================================================================
#get all the flood aeps
lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist()
#blank holder
res_ser = pd.Series(index = lvl0vals)
#loop and calc sums for each flood
for aep in lvl0vals:
res_ser[aep] = dxcol.loc[:,(aep,'total')].sum()
#add extras
if not self.ead_tot is None:
res_ser['ead_tot'] = self.ead_tot
res_ser['dt'] = self.tstep_o.year
res_ser['sim'] = self.simu_o.ind
lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name)
hp.pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex,
first = self.write_dmg_fly_first, tag = 'fdmg totals',
db_f = self.db_f, logger=logger) #write results on the fly
self.write_dmg_fly_first = False
return
def get_plot_kids(self): #raise kids for plotting the damage summaries
logger = self.logger.getChild('get_plot_kids')
#=======================================================================
# get slice of aad_fmt_df matching the aad cols
#=======================================================================
aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab
dmgs_df = self.dmgs_df
self.data = dmgs_df
boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad
aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3
"""
hp.pd.view_web_df(self.data)
hp.pd.view_web_df(df)
hp.pd.view_web_df(aad_fmt_df_slice)
aad_fmt_df_slice.columns
"""
#=======================================================================
# formatter kids setup
#=======================================================================
"""need to run this every time so the data is updated
TODO: allow some updating here so we dont have to reduibl deach time
if self.plotter_kids_dict is None:"""
self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp.data.Data_o)
logger.debug('finisehd \n')
def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True,
ylims = None, #tuple of min/max values for the y-axis
): #plot curve of aad
"""
see tab 'aad_fmt' to control what is plotted and formatting
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dmgs')
if wtf == None: wtf = self.session._write_figs
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if self.dmgs_df is None:
raise IOError
#=======================================================================
# setup
#=======================================================================
if not ylims is None:
try:
ylims = eval(ylims)
except:
pass
#get the plot workers
if self.plotr_d is None:
self.get_plot_kids()
kids_d = self.plotr_d
title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df))
logger.debug('with \'%s\''%title)
if not self.tstep_o is None:
title = title + ' for %s'%self.tstep_o.name
#=======================================================================
# update plotters
#=======================================================================
logger.debug('updating plotters with my data')
#get data
data_og = self.data.copy() #store this for later
if self.dmgs_df_wtail is None:
df = self.dmgs_df.copy()
else:
df = self.dmgs_df_wtail.copy()
df = df.sort_values(xaxis, ascending=True)
#reformat data
df.set_index(xaxis, inplace = True)
#re set
self.data = df
#tell kids to refresh their data from here
for gid, obj in kids_d.iteritems(): obj.data = obj.loadr_vir()
self.data = data_og #reset the data
#=======================================================================
# get annotation
#=======================================================================
val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6)
#val_str = "{:,.2f}".format(self.ead_tot)
"""
txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\
'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))"""
txt = 'total EAD = %s'%val_str
#=======================================================================
#plot the workers
#=======================================================================
#twinx
if not right_nm is None:
logger.debug('twinning axis with name \'%s\''%right_nm)
title = title + '_twin'
# sort children into left/right buckets by name to plot on each axis
right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm)
if self.db_f:
if len (right_pdb_d) <1: raise IOError
#=======================================================================
# #send for plotting
#=======================================================================
'this plots both bundles by their data indexes'
ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d,
logx=logx, xlab = xaxis, title=title, annot = txt,
wtf=False)
'cant figure out why teh annot is plotting twice'
ax2.set_ylim(0, 1) #prob limits
legon = False
else:
logger.debug('single axis')
try:
del kids_d['prob']
except:
pass
pdb = self.get_pdb_dict(kids_d.values())
ax1 = self.plot_bundles(pdb,
logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt,
wtf=False)
legon=True
#hatch
#=======================================================================
# post formatting
#=======================================================================
#set axis limits
if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits
elif xaxis == 'prob': ax1.set_xlim(0, .6)
if not ylims is None:
ax1.set_ylim(ylims[0], ylims[1])
#ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits
#=======================================================================
# format y axis labels
#======================================================= ================
old_tick_l = ax1.get_yticks() #get teh old labels
# build the new ticks
l = []
for value in old_tick_l:
new_v = '$' + "{:,.0f}".format(value/1e6)
l.append(new_v)
#apply the new labels
ax1.set_yticklabels(l)
"""
#add thousands comma
ax1.get_yaxis().set_major_formatter(
#matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6)))
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))"""
if xaxis == 'ari':
ax1.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
if wtf:
fig = ax1.figure
savepath_raw = os.path.join(self.outpath,title)
flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon)
if not flag: raise IOError
#plt.close()
return
class Flood(
hp.dyno.Dyno_wrap,
hp.sim.Sim_o,
hp.oop.Parent, #flood object worker
hp.oop.Child):
#===========================================================================
# program pars
#===========================================================================
gpwr_f = False #grid power flag palceholder
#===========================================================================
# user defineid pars
#===========================================================================
ari = None
#loaded from flood table
#area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes).
area_egrd00 = ''
area_egrd01 = ''
area_egrd02 = ''
area_egrd00_code = None
area_egrd01_code = None
area_egrd02_code = None
#===========================================================================
# calculated pars
#===========================================================================
hdep_avg = 0 #average house depth
#damate properties
total = 0
BS = 0
BC = 0
MS = 0
MC = 0
dmg_gw = 0
dmg_sw = 0
dmg_df_blank =None
wsl_avg = 0
#===========================================================================
# data containers
#===========================================================================
hdmg_cnt = 0
dmg_df = None
dmg_res_df = None
#bsmt_egrd counters. see get_begrd_cnt()
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self, parent, *vars, **kwargs):
logger = mod_logger.getChild('Flood')
logger.debug('start _init_')
#=======================================================================
# #attach custom vars
#=======================================================================
self.inherit_parent_ans=set(['mind', 'dmg_types'])
#=======================================================================
# initilize cascade
#=======================================================================
super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# common setup
#=======================================================================
if self.sib_cnt == 0:
#update the resets
pass
#=======================================================================
# unique setup
#=======================================================================
""" handled by the outputr
self.reset_d.update({'hdmg_cnt':0})"""
self.ari = int(self.ari)
self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling
#=======================================================================
# setup functions
#=======================================================================
self.set_gpwr_f()
logger.debug('set_dmg_df_blank()')
self.set_dmg_df_blank()
logger.debug('get your water levels from the selected wsl table \n')
self.set_wsl_frm_tbl()
logger.debug('set_area_egrd()')
self.set_area_egrd()
logger.debug('get_info_from_binv()')
df = self.get_info_from_binv() #initial run to set blank frame
self.set_wsl_from_egrd(df)
""" moved into set_wsl_frm_tbl()
logger.debug('\n')
self.setup_dmg_df()"""
self.init_dyno()
self.logger.debug('__init___ finished \n')
def set_dmg_df_blank(self):
logger = self.logger.getChild('set_dmg_df_blank')
binv_df = self.model.binv.childmeta_df
colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl'])
'wsl should be redundant'
#get boolean
self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv()
#get teh blank frame
self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame
'this still needs the wsl levels attached based on your area exposure grade'
logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape)))
return
def set_gpwr_f(self): #set your power flag
if self.is_frozen('gpwr_f'): return True#shortcut for frozen
logger = self.logger.getChild('set_gpwr_f')
#=======================================================================
# get based on aep
#=======================================================================
min_aep = int(self.model.gpwr_aep)
if self.ari < min_aep: gpwr_f = True
else: gpwr_f = False
logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f))
#update handler
self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f')
return True
def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table
flood_tbl_nm = None, #name of flood table to pull raw data from
#bid_l=None,
):
"""
here we get the raw values
these are later modified by teh area_egrd with self.get_wsl_from_egrd()
#=======================================================================
# INPUTS
#=======================================================================
flood_tbl_df_raw: raw df of the classic flood table
columns:` count, aep, aep, aep, aep....\
real_columns: bldg_id, CPID, depth, depth, depth, etc...
index: unique arbitrary
wsl_ser: series of wsl for this flood on each bldg_id
#=======================================================================
# calls
#=======================================================================
dynp handles Fdmg.flood_tbl_nm
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_frm_tbl')
if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm
#=======================================================================
# get data
#=======================================================================
#pull the raw flood tables
ftbl_o = self.model.ftblos_d[flood_tbl_nm]
wsl_d = ftbl_o.wsl_d
df = pd.DataFrame(index = wsl_d.values()[0].index) #blank frame from teh first entry
#=======================================================================
# loop and apply for each flood type
#=======================================================================
for ftype, df1 in wsl_d.iteritems():
#=======================================================================
# data checks
#=======================================================================
if self.db_f:
if not ftype in ['wet', 'dry', 'damp']:
raise IOError
df_raw =df1.copy()
if not self.ari in df_raw.columns:
logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'%
(self.ari, self.model.ftblos_d[flood_tbl_nm].filepath))
raise IOError
#=======================================================================
# slice for this flood
#=======================================================================
boolcol = df1.columns == self.ari #slice for this aep
#get the series for this
wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float)
#wsl_ser = wsl_ser.rename(ftype) #rename with the aep
'binv slicing moved to Flood_tbl.clean_data()'
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if len(wsl_ser) <1:
raise IOError
""" allowing
#check for nuls
if np.any(pd.isnull(wsl_ser2)):
raise IOError"""
#=======================================================================
# wrap up report and attach
#=======================================================================
df[ftype] = wsl_ser
logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i'
%(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari))
self.wsl_df = df #set this
'notusing dy nps'
if self.session.state == 'init':
self.reset_d['wsl_df'] = df.copy()
return True
def set_area_egrd(self): #pull your area exposure grade from somewhere
"""
#=======================================================================
# calls
#=======================================================================
self.__init__()
dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there
"""
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state=='init':
dep_l = [([self.model], ['set_area_prot_lvl()'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'):
return False
logger = self.logger.getChild('set_area_egrd')
#=======================================================================
# steal egrd from elsewhere table if asked
#=======================================================================
for cnt in range(0,3,1): #loop through each one
attn = 'area_egrd%02d'%cnt
area_egrd_code = getattr(self, attn + '_code')
if area_egrd_code in ['dry', 'damp', 'wet']:
area_egrd = area_egrd_code
#===================================================================
# pull from teh flood table
#===================================================================
elif area_egrd_code == '*ftbl':
ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object
area_egrd = getattr(ftbl_o, attn) #get from teh table
#===================================================================
# pull from teh model
#===================================================================
elif area_egrd_code == '*model':
area_egrd = getattr(self.model, attn) #get from teh table
else:
logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code))
raise IOError
#===================================================================
# set these
#===================================================================
self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd')
'this should triger generating a new wsl set to teh blank_dmg_df'
logger.debug('set \'%s\' from \'%s\' as \'%s\''
%(attn, area_egrd_code,area_egrd))
if self.db_f:
if not area_egrd in ['dry', 'damp', 'wet']:
raise IOError
return True
def set_wsl_from_egrd(self, df = None): #calculate the wsl based on teh area_egrd
"""
This is a partial results retrival for non damage function results
TODO:
consider checking for depednency on House.area_prot_lvl
#=======================================================================
# calls
#=======================================================================
self.__init__
dynp handles Flood.area_egrd##
"""
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
dep_l = [([self], ['set_area_egrd()', 'set_wsl_frm_tbl()'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_from_egrd')
#if wsl_delta is None: wsl_delta = self.model.wsl_delta
#=======================================================================
# get data
#=======================================================================
if df is None: df = self.get_info_from_binv()
'need to have updated area_prot_lvls'
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not isinstance(df, pd.DataFrame): raise IOError
if not len(df) > 0: raise IOError
#=======================================================================
# add the wsl for each area_egrd
#=======================================================================
for prot_lvl in range(0,3,1): #loop through each one
#get your grade fro this prot_lvl
attn = 'area_egrd%02d'%prot_lvl
area_egrd = getattr(self, attn)
#identify the housese for this protection level
boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl
if boolidx.sum() == 0: continue
#give them the wsl corresponding to this grade
df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd]
#set a tag for the area_egrd
if 'area_egrd' in df.columns:
df.loc[boolidx, 'area_egrd'] = area_egrd
logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd))
#=======================================================================
# set this
#=======================================================================
self.dmg_df_blank = df
#=======================================================================
# post check
#=======================================================================
logger.debug('set dmg_df_blank with %s'%str(df.shape))
if self.session.state=='init':
self.reset_d['dmg_df_blank'] = df.copy()
if self.db_f:
if np.any(pd.isnull(df['wsl'])):
logger.error('got some wsl nulls')
raise IOError
return True
"""
hp.pd.v(df)
hp.pd.v(self.dmg_df_blank)
"""
def run_fld(self, **kwargs): #shortcut to collect all the functions for a simulation ru n
self.run_cnt += 1
dmg_df_blank = self.get_info_from_binv()
dmg_df = self.get_dmg_set(dmg_df_blank, **kwargs)
if self.db_f: self.check_dmg_df(dmg_df)
'leaving this here for simplicity'
self.calc_statres_flood(dmg_df)
return dmg_df
def get_info_from_binv(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_info_from_binv')
binv_df = self.model.binv.childmeta_df
#pull static values
binvboolcol = self.binvboolcol
df = self.dmg_df_blank.copy()
'this should have wsl added to it from set_wsl_from_egrd()'
if self.db_f:
if not len(binvboolcol) == len(binv_df.columns):
logger.warning('got length mismatch between binvboolcol (%i) and the binv_df columns (%i)'%
(len(binvboolcol), len(binv_df.columns)))
'pandas will handle this mistmatch.. just ignores the end'
#=======================================================================
# #update with values from teh binv
#=======================================================================
df.update(binv_df.loc[:,binvboolcol], overwrite=True) #update from all the values in teh binv
logger.debug('retreived %i values from the binv_df on: %s'
%(binv_df.loc[:,binvboolcol].count().count(), binv_df.loc[:,binvboolcol].columns.tolist()))
#=======================================================================
# macro calcs
#=======================================================================
if 'hse_depth' in df.columns:
df['hse_depth'] = df['wsl'] - df['anchor_el']
#groudn water damage flag
if 'gw_f' in df.columns:
df.loc[:,'gw_f'] = df['dem_el'] > df['wsl'] #water is below grade
if self.db_f:
if 'bsmt_egrd' in binv_df.columns:
raise IOError
return df
def get_dmg_set(self, #calcluate the damage for each house
dmg_df, #empty frame for filling with damage results
#dmg_type_list='all',
#bid_l = None,
#wsl_delta = None,
dmg_rat_f =None, #includt eh damage ratio in results
):
"""
#=======================================================================
# INPUTS
#=======================================================================
depth_ser: series of depths (for this flood) with index = bldg_id
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmg_set(%s)'%self.get_id())
if dmg_rat_f is None: dmg_rat_f = self.model.dmg_rat_f
hse_od = self.model.binv.hse_od #ordred dictionary by bid: hse_dato
""" see get_wsl_from_egrd()
#=======================================================================
# build the dmg_df
#=======================================================================
bid_ar = self.model.binv.data.loc[:,self.mind].values.astype(np.int) #get everything from teh binv
dmg_df = pd.DataFrame(index = bid_ar, columns = self.model.dmg_df_cols)"""
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
if not isinstance(dmg_df, pd.DataFrame):
raise IOError
boolidx = dmg_df.index.isin(hse_od.keys())
if not np.all(boolidx):
logger.error('some of the bldg_ids in the wsl_ser were not found in the binv: \n %s'
%dmg_df.index[~boolidx])
raise IOError
#check the damage columns are empty
boolcol = np.isin(dmg_df.columns, ['MS', 'MC', 'BS', 'BC', 'GS', 'total']) #identify damage columns
if not np.all(pd.isnull(dmg_df.loc[:,boolcol])):
raise IOError
#=======================================================================
# frame setup
#=======================================================================
#identify columns containing damage results
dmgbool = np.logical_or(dmg_df.columns.isin(self.model.dmg_types), #damages
pd.Series(dmg_df.columns).str.contains('_rat').values) #damage ratios
#=======================================================================
# get teh damage for each house
#=======================================================================
logger.debug('getting damage for %s entries'%(str(dmg_df.shape)))
"""
to improve performance, we're only looping through those entries with real flood deths (skin_df)
however, the full results frame is still used (non_real entries should equal zero)
"""
"""generally no memory added during these
self.session.prof(state='%s.get_dmg_set.loop'%(self.name)) #memory profiling"""
cnt = 0
first = True
for index, row in dmg_df.iterrows(): #loop through each row
#===================================================================
# pre-printouts
#===================================================================
#self.session.prof(state='%s.get_dmg_set.%i'%(self.name, cnt)) #memory profiling
cnt +=1
if cnt%self.session._logstep == 0: logger.info(' (%i/%i)'%(cnt, len(dmg_df)))
#===================================================================
# retrive info
#===================================================================
hse_obj = hse_od[index] #get this house object by bldg_id
hse_obj.floodo = self #let the house know who is flooding it
logger.debug('on hse \'%s\' \n'%hse_obj.name)
#===================================================================
# add damage results
#===================================================================
if row['hse_depth'] < self.model.hse_skip_depth:
logger.debug('depth below hse_obj.vuln_el for bldg_id: %i. setting fdmg=0'%index)
row[dmgbool] = 0.0 #set all damage to zero
#depth significant. calc it
else:
#runt he house
logger.debug('running house \n')
dmg_ser = hse_obj.run_hse(row['wsl'], dmg_rat_f = dmg_rat_f)
row.update(dmg_ser) #add all these entries
#===================================================================
# extract extra attributers from teh house
#===================================================================
#find the entries to skip attribute in filling
if first:
boolar1 = ~np.isin(row.index, ['total'])
boolar2 = pd.isnull(row)
boolar = np.logical_and(boolar1, boolar2)
first = False
#fill thtese
for attn, v in row[boolar].iteritems():
row[attn] = getattr(hse_obj, attn)
#===================================================================
# wrap up
#===================================================================
dmg_df.loc[index,:] = row #store this row back into the full resulst frame
#=======================================================================
# macro stats
#=======================================================================
#total
boolcol = dmg_df.columns.isin(self.model.dmg_types)
dmg_df['total'] = dmg_df.iloc[:,boolcol].sum(axis = 1) #get the sum
#=======================================================================
# closeout and reporting
#=======================================================================
#print out summaries
if not self.db_f:
logger.info('finished for %i houses'%(len(dmg_df.index)))
else:
totdmg = dmg_df['total'].sum()
totdmg_str = '$' + "{:,.2f}".format(totdmg)
logger.info('got totdmg = %s for %i houses'%(totdmg_str,len(dmg_df.index)))
if np.any(pd.isnull(dmg_df)):
raise IOError
for dmg_type in self.model.dmg_types:
dmg_tot = dmg_df[dmg_type].sum()
dmg_tot_str = '$' + "{:,.2f}".format(dmg_tot)
logger.debug('for dmg_type \'%s\' dmg_tot = %s'%(dmg_type, dmg_tot_str))
return dmg_df
def check_dmg_df(self, df):
logger = self.logger.getChild('check_dmg_df')
#=======================================================================
# check totals
#=======================================================================
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
if not round(df['total'].sum(),2) == round(df.loc[:, boolcol].sum().sum(), 2):
logger.error('total sum did not match sum from damages')
raise IOError
def calc_statres_flood(self, df): #calculate your statistics
'running this always'
logger = self.logger.getChild('calc_statres_flood')
s = self.session.outpars_d[self.__class__.__name__]
"""needed?
self.outpath = os.path.join(self.model.outpath, self.name)"""
#=======================================================================
# total damage
#=======================================================================
for dmg_code in self.model.dmg_types + ['total']:
#loop through and see if the user asked for this output
'e.g. MC, MS, BC, BS, total'
if dmg_code in s:
v = df[dmg_code].sum()
setattr(self, dmg_code, v)
logger.debug('set \'%s\' to %.2f'%(dmg_code, v))
#=======================================================================
# by flood type
#=======================================================================
if 'dmg_sw' in s:
self.dmg_sw = df.loc[~df['gw_f'], 'total'].sum() #sum all those with surface water
if 'dmg_gw' in s:
self.dmg_gw = df.loc[df['gw_f'], 'total'].sum() #sum all those with surface water
#=======================================================================
# number of houses with damage
#=======================================================================
if 'hdmg_cnt' in s:
boolidx = df.loc[:, 'total'] > 0
self.hdmg_cnt = boolidx.sum()
#=======================================================================
# average house depth
#=======================================================================
if 'hdep_avg' in s:
self.hdep_avg = np.mean(df.loc[:,'hse_depth'])
#=======================================================================
# wsl average
#=======================================================================
if 'wsl_avg' in s:
self.wsl_avg = np.mean(df.loc[:,'wsl'])
#=======================================================================
# basement exposure grade counts
#=======================================================================
'just calcing all if any of them are requested'
boolar = np.isin(np.array(['bwet_cnt', 'bdamp_cnt', 'bdry_cnt']),
np.array(s))
if np.any(boolar): self.get_begrd_cnt()
#=======================================================================
# plots
#=======================================================================
if 'dmg_res_df' in s:
self.dmg_res_df = df
"""
hp.pd.v(df)
"""
return
def get_begrd_cnt(self):
logger = self.logger.getChild('get_begrd_cnt')
df = self.dmg_res_df
#=======================================================================
# #get egrades
# try:
# ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
# except:
# df.columns.values.tolist()
# raise IOError
#=======================================================================
ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
begrd_l = ser.unique().tolist()
logger.debug('looping through %i bsmt_egrds: %s'%(len(begrd_l), begrd_l))
for bsmt_egrd in begrd_l:
att_n = 'b'+bsmt_egrd+'_cnt'
#count the number of occurances
boolar = ser == bsmt_egrd
setattr(self, att_n, int(boolar.sum()))
logger.debug('setting \'%s\' = %i'%(att_n, boolar.sum()))
logger.debug('finished \n')
return
def plot_dmg_pie(self, dmg_sum_ser_raw = None,
exp_str = 1, title = None, wtf=None): #generate a pie chart for the damage
"""
#=======================================================================
# INPUTS
#=======================================================================
dmg_sum_ser: series of damage values (see calc_summary_ser)
index: dmg_types
values: fdmg totals for each type for this flood
exp_main: amoutn to explote structural damage values by
"""
#=======================================================================
# set defaults
#=======================================================================
logger = self.logger.getChild('plot_dmg_pie')
if title == None: title = self.session.tag + ' '+self.name+' ' + 'dmgpie_plot'
if wtf is None: wtf = self.session._write_figs
if dmg_sum_ser_raw == None: #just calculate
dmg_sum_ser_raw = self.dmg_res_df[self.dmg_types].sum()
#dmg_sum_ser_raw = self.calc_summary_ser()
logger.debug('with dmg_sum_ser_raw: \n %s'%dmg_sum_ser_raw)
#=======================================================================
# data cleaning
#=======================================================================
#drop na
dmg_sum_ser1 = dmg_sum_ser_raw.dropna()
#drop zero values
boolidx = dmg_sum_ser1 == 0
dmg_sum_ser2 = dmg_sum_ser1[~boolidx]
if np.all(boolidx):
logger.warning('got zero damages. not pie plot generated')
return
if boolidx.sum() > 0:
logger.warning('dmg_pie dropped %s zero totals'%dmg_sum_ser1.index[boolidx].tolist())
dmg_sum_ser = dmg_sum_ser2
#=======================================================================
# get data
#=======================================================================
#shortcuts
dmg_types = dmg_sum_ser.index.tolist()
labels = dmg_types
sizes = dmg_sum_ser.values.tolist()
#=======================================================================
# #get properties list from the dfunc tab
#=======================================================================
colors = []
explode_list = []
wed_lab_list = []
dfunc_df = self.session.pars_df_d['dfunc']
for dmg_type in dmg_types:
boolidx = dfunc_df['dmg_type'] == dmg_type #id this dmg_type
#color
color = dfunc_df.loc[boolidx,'color'].values[0]
colors.append(color) #add to the list
#explode
explode = dfunc_df.loc[boolidx,'explode'].values[0]
explode_list.append(explode) #add to the list
#wedge_lable
wed_lab = '$' + "{:,.2f}".format(dmg_sum_ser[dmg_type])
wed_lab_list.append(wed_lab)
plt.close()
fig, ax = plt.subplots()
wedges = ax.pie(sizes, explode=explode_list, labels=labels, colors = colors,
autopct=hp.plot.autopct_dollars(sizes),
shadow=True, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax.set_title(title)
if wtf: #write to file
filetail = self.session.name + ' '+self.name+' ' + 'dmgpie_plot'
filename = os.path.join(self.model.outpath, filetail)
hp.plot.save_fig(self, fig, savepath_raw = filename)
return ax
def plot_dmg_scatter(self, #scatter plot of damage for each house
dmg_df_raw=None, yvar = 'hse_depth', xvar = 'total', plot_zeros=True,
title=None, wtf=None, ax=None,
linewidth = 0, markersize = 3, marker = 'x',
**kwargs):
"""
for complex figures, axes should be passed and returned
#=======================================================================
# INPUTS
#=======================================================================
should really leave this for post processing
plot_zeros: flag to indicate whether entries with x value = 0 should be included
#=======================================================================
# TODO
#=======================================================================
redo this with the plot worker
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dmg_scatter')
if title == None: title = self.session.tag + ' '+self.name + ' dmg_scatter_plot'
if wtf is None: wtf = self.session._write_figs
if dmg_df_raw == None:
dmg_res_df_raw = self.dmg_res_df #just use the attached one
if not hp.pd.isdf(dmg_res_df_raw): raise IOError
#=======================================================================
# manipulate data for plotting
#=======================================================================
if plot_zeros:
dmg_df = dmg_res_df_raw
else:
#exclude those entries with zero value on the xvar
boolidx = dmg_res_df_raw[xvar] == 0
dmg_df = dmg_res_df_raw[~boolidx]
self.logger.warning('%s values = zero (%i) excluded from plot'%(xvar, boolidx.sum()))
#=======================================================================
# setup data plot
#=======================================================================
x_ar = dmg_df[xvar].values.tolist() #damage
xlab = 'damage($)'
'could make this more dynamic'
if sum(x_ar) <=0:
logger.warning('got no damage. no plot generated')
return
y_ar = dmg_df[yvar].values.tolist() #depth
#=======================================================================
# SEtup defaults
#=======================================================================
if ax == None:
plt.close('all')
fig = plt.figure(2)
fig.set_size_inches(9, 6)
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_ylabel(yvar + '(m)')
ax.set_xlabel(xlab)
#set limits
#ax.set_xlim(min(x_ar), max(x_ar))
#ax.set_ylim(min(y_ar), max(y_ar))
else:
fig = ax.figure
label = self.name + ' ' + xvar
#=======================================================================
# send teh data for plotting
#=======================================================================
pline = ax.plot(x_ar,y_ar,
label = label,
linewidth = linewidth, markersize = markersize, marker = marker,
**kwargs)
#=======================================================================
# post formatting
#=======================================================================
ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
"""
plt.show()
"""
if wtf: #trigger for saving the fiture
filetail = title
filename = os.path.join(self.model.outpath, filetail)
hp.plot.save_fig(self, fig, savepath_raw = filename, logger=logger)
return pline
class Binv( #class object for a building inventory
hp.data.Data_wrapper,
hp.plot.Plot_o,
hp.sim.Sim_o,
hp.oop.Parent,
hp.oop.Child):
#===========================================================================
# program pars
#===========================================================================
# legacy index numbers
legacy_ind_d = {0:'ID',1:'address',2:'CPID',10:'class', 11:'struct_type', 13:'gis_area', \
18:'bsmt_f', 19:'ff_height', 20:'xcoord',21:'ycoord', 25:'dem_el'}
#column index where the legacy binv transitions to teh new binv
legacy_break_ind = 26
#column names expected in the cleaned binv
exepcted_coln = ['gis_area', 'bsmt_f', 'ff_height',\
'dem_el', 'value', 'ayoc', 'B_f_height',\
'bkflowv_f','sumpump_f', 'genorat_f', 'hse_type', \
'name', 'anchor_el', 'parcel_area']
hse_type_list = ['AA', 'AD', 'BA', 'BC', 'BD', 'CA', 'CC', 'CD'] #classification of building types
#===========================================================================
# user provided
#===========================================================================
legacy_binv_f = True
#===========================================================================
# calculated pars
#===========================================================================
#===========================================================================
# data holders
#===========================================================================
#cnt = 0
hnew_cnt = 0
hAD_cnt = 0
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Binv')
logger.debug('start _init_')
"""Im explicitly attaching the child datobuilder here
dont want to change the syntax of the binv
inspect.isclass(self.kid_class)
"""
self.inherit_parent_ans=set(['mind', 'legacy_binv_f', 'gis_area_max'])
super(Binv, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# special inheritance
#=======================================================================
#self.model = self.parent
self.kid_class = House
self.reset_d.update({'hnew_cnt':0, 'hAD_cnt':0})
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if not self.kid_class == House:
raise IOError
if not isinstance(self.reset_d, dict):
raise IOError
if self.model is None:
raise IOError
if not self.model.name == self.parent.name:
raise IOError
#=======================================================================
# special inits
#=======================================================================
self.exepcted_coln = set(self.exepcted_coln + [self.mind]) #expect the mind in the column names as well
self.load_data()
logger.debug('finiished _init_ \n')
return
def load_data(self): #custom data loader
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_data')
#test pars
if self.session._parlo_f:
test_trim_row = self.test_trim_row
else: test_trim_row = None
#=======================================================================
# load the file
#=======================================================================
self.filepath = self.get_filepath()
logger.debug('from filepath: %s'%self.filepath)
#load from file
df_raw = hp.pd.load_xls_df(self.filepath, logger=logger, test_trim_row = test_trim_row,
header = 0, index_col = None)
#=======================================================================
# send for cleaning
#=======================================================================
df1 = hp.pd.clean_datapars(df_raw, logger = logger)
"""
hp.pd.v(df3)
"""
#=======================================================================
# clean per the leagacy binv
#=======================================================================
if self.legacy_binv_f:
df2 = self.legacy_clean_df(df1)
else:
df2 = df1
#=======================================================================
# standard clean
#=======================================================================
df3 = self.clean_inv_df(df2)
#=======================================================================
# macro data manipulations
#=======================================================================
#add names column
if not 'name' in df3.columns:
df3['name'] = 'h' + df3.loc[:, self.mind].astype(np.string_) #format as strings
#add anchor el
if not 'anchor_el' in df3.columns:
df3['anchor_el'] = df3['dem_el'] + df3['ff_height']
df3['anchor_el'] = df3['anchor_el'].astype(np.float)
#=======================================================================
# checking
#=======================================================================
if self.db_f: self.check_binv_df(df3)
#=======================================================================
# wrap up
#=======================================================================
self.childmeta_df = df3.copy()
#shortcut lists
self.bid_l = df3[self.mind].astype(np.int).values.tolist()
self.hse_types_l = df3['hse_type'].unique().tolist()
logger.info('attached binv_df with %s'%str(df3.shape))
return
"""
hp.pd.v(df3)
"""
def legacy_clean_df(self, df_raw): #compile data from legacy (rfda) inventory syntax
"""
pulling column headers from the dictionary of location keys
creating some new headers as combinations of this
"""
#=======================================================================
# setup
#=======================================================================
logger = self.logger.getChild('legacy_clean_df')
d = self.legacy_ind_d
#=======================================================================
# split the df into legacy and non
#=======================================================================
df_leg_raw = df_raw.iloc[:,0:self.legacy_break_ind]
df_new = df_raw.iloc[:,self.legacy_break_ind+1:]
#=======================================================================
# clean the legacy frame
#=======================================================================
#change all the column names
df_leg1 = df_leg_raw.copy()
""" couldnt get this to work
df_leg1.rename(mapper=d, index = 'column')"""
for colind, coln in enumerate(df_leg_raw.columns):
if not colind in d.keys():continue
df_leg1.rename(columns = {coln:d[colind]}, inplace=True)
logger.debug('renamed \'%s\' to \'%s\''%(coln,d[colind] ))
#trim down to these useful columns
boolcol = df_leg1.columns.isin(d.values()) #identify columns in the translation dictionary
df_leg2 = df_leg1.loc[:,boolcol]
logger.debug('trimmed legacy binv from %i to %i cols'%(len(df_leg_raw.columns), boolcol.sum()))
#=======================================================================
# add back the new frame
#=======================================================================
df_merge = df_leg2.join(df_new)
#=======================================================================
# house t ype
#=======================================================================
df_merge.loc[:,'hse_type'] = df_leg2.loc[:,'class'] + df_leg2.loc[:,'struct_type']
logger.debug('cleaned the binv from %s to %s'%(str(df_raw.shape), str(df_merge.shape)))
if self.db_f:
if not len(df_merge) == len(df_raw):
raise IOError
if np.any(pd.isnull(df_merge['hse_type'])):
raise IOError
return df_merge
"""
hp.pd.v(df_leg_raw)
hp.pd.v(df_merge)
hp.pd.v(df_raw)
"""
def clean_inv_df(self, df_raw): #placeholder for custom cleaning
logger = self.logger.getChild('clean_inv_df')
#clean with kill_flags
'this makes it easy to trim the data'
df1 = hp.pd.clean_kill_flag(df_raw, logger = logger)
#=======================================================================
# format boolean columns
#=======================================================================
df1 = hp.pd.format_bool_cols(df1, logger = logger)
#=======================================================================
# #reformat
#=======================================================================
# the MIND as integer
df1.loc[:, self.mind] = df1.loc[:, self.mind].astype(np.int) #reset as an integer
#ayoc as an intger
df1['ayoc'] = df1['ayoc'].astype(np.int)
#df1['hse_type'] = df1['hse_type'].astype(np.string_)
#=======================================================================
# #reindex by a sorted mind (and keep the column)
#=======================================================================
df2 = df1.set_index(self.mind, drop=False).sort_index()
#=======================================================================
# trim to the desired columns
#=======================================================================
boolcol = df2.columns.isin(self.exepcted_coln)
df3 = df2.loc[:,boolcol]
return df3
"""
df1.columns.str.strip()
df2.columns[~boolcol]
hp.pd.v(df2)
"""
def check_binv_df(self, df):
logger = self.logger.getChild('check_binv_df')
'todo: add some template check'
if not hp.pd.isdf(df):
raise IOError
if np.any(pd.isnull(df)):
raise IOError
#=======================================================================
# check all the expected columns are there
#=======================================================================
boolcol = np.isin(list(self.exepcted_coln), df.columns)
if not np.all(boolcol):
logger.error('could not find \'%s\' in the binv_df'
%np.array(list(self.exepcted_coln))[~boolcol])
"""
hp.pd.v(df)
"""
raise IOError
#=======================================================================
# check area column
#=======================================================================
boolidx = df.loc[:,'gis_area']< self.model.gis_area_min
if np.any(boolidx):
logger.error('got %i binv entries with area < 5'%boolidx.sum())
raise IOError
boolidx = df.loc[:,'gis_area']> self.model.gis_area_max
if np.any(boolidx):
logger.error('got %i binv entries with area > %.2f'%(boolidx.sum(), self.model.gis_area_max))
raise IOError
if 'bsmt_egrd' in df:
raise IOError
return
#===============================================================================
# def Xget_childmetadf(self): #custom childmeta builder
# """
# this should overwrite hte default function and be called from raise_children
# Here we add the hierarchy info to the bldg inventory
# so the children can be raised
# """
# logger = self.logger.getChild('get_childmetadf')
# df1 = self.data.copy()
#
# logger.debug('with data %s'%(str(df1.shape)))
# #=======================================================================
# # macro data manipulations
# #=======================================================================
#
# #add names column
# if not 'name' in df1.columns:
# df1['name'] = 'h' + df1.loc[:, self.mind].astype(np.string_) #format as strings
#
#
# #add anchor el
# if not 'anchor_el' in df1.columns:
# df1['anchor_el'] = df1['dem_el'] + df1['ff_height']
# df1['anchor_el'] = df1['anchor_el'].astype(np.float)
#
# """
# see House.set_hse_anchor()
# anchor_el = self.dem_el + float(self.ff_height) #height + surface elevation
# """
#
#
# #=======================================================================
# # wrap up
# #=======================================================================
# self.childmeta_df = df1
#
# """want to capture some of the edits made by House
# moved this to after raise_chidlren
# #add the df tot he rest list
# self.reset_d['childmeta_df'] = df2.copy()"""
#
# 'House makes some edits to this so we need to update this copy'
#
# """
# hp.pd.v(df1)
# hp.pd.v(df2)
# hp.pd.v(self.childmeta_df)
# hp.pd.v(self.data)
# """
#
# return
#
#===============================================================================
def raise_houses(self):
#=======================================================================
# setup
#=======================================================================
start = time.time()
logger = self.logger.getChild('raise_houses')
df = self.childmeta_df #build the childmeta intelligently
'we could probably just passt the data directly'
if self.db_f:
if not hp.pd.isdf(df):
raise IOError
logger.info('executing with data %s'%str(df.shape))
hse_n_d = self.raise_children_df(df, #run teh generic child raiser
kid_class = self.kid_class,
dup_sibs_f = True)
"""
House.spc_inherit_anl
self.kid_class
"""
#=======================================================================
# add a custom sorted dictionary by name
#=======================================================================
#build a normal dictionary of this
d = dict()
for cname, childo in hse_n_d.iteritems():
d[childo.bldg_id] = weakref.proxy(childo)
#bundle this into a sorted ordered dict
self.hse_od = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
"""put this here so the edits made by House are captured"""
self.reset_d['childmeta_df'] = self.childmeta_df.copy()
logger.debug('calc_binv_stats() \n')
self.calc_binv_stats()
stop = time.time()
logger.info('finished with %i hosues in %.4f secs'%(len(d), stop - start))
return
def set_all_hse_atts(self, attn, #reset an attribute name/value pair to all houses in the binv
attv=None, #single value to apply to each house
ser=None, #series to pull data from indexed by the obj_key
obj_key = 'dfloc',
):
"""
NOTE: oop.attach_att_df() is similar, but doesnt handle the dynamic updating
udev.set_fhr is also similar
ToDo: consider moving this into dyno
"""
logger = self.logger.getChild('set_all_hse_atts')
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not ser is None:
if not isinstance(ser, pd.Series):
raise IOError
if not len(self.hse_od) > 0:
raise IOError
if (attv is None) and (ser is None):
raise IOError #need at least one input
#=======================================================================
# loop and add to each house
#=======================================================================
logger.debug('dynamically updating %i houses on \'%s\''%(len(self.hse_od), attn))
for k, hse in self.hse_od.iteritems():
if not ser is None:
attv = ser[getattr(hse, obj_key)]
#haqndle the updates on this house
hse.handle_upd(attn, attv, proxy(self), call_func = 'set_all_hse_atts')
return
"""
df = self.childmeta_df
df.columns
hp.pd.v(df)
"""
def calc_binv_stats(self): #calculate output stats on the inventory
"""
#=======================================================================
# CALLS
#=======================================================================
__init__
raise_children #after raising all the Houses
(Fdmg or Udev).get_restults()
#=======================================================================
# TODO
#=======================================================================
fix this so it acts more like a dynp.update with ques triggerd from changes on the HOuse
#=======================================================================
# TESTING
#=======================================================================
hp.pd.v(df)
df.columns.values.tolist()
"""
#logger = self.logger.getChild('calc_binv_stats')
s = self.session.outpars_d[self.__class__.__name__]
"""using this in some annotations
if 'cnt' in s:"""
self.cnt = len(self.hse_od) #get the number of houses in the binv
if 'hAD_cnt' in s:
#house type counts
boolidx = self.childmeta_df.loc[:, 'hse_type'] == 'AD'
self.hAD_cnt = boolidx.sum()
if 'hnew_cnt' in s:
#new house counts
boolidx = self.childmeta_df.loc[:,'ayoc'] > self.session.year0
self.hnew_cnt = boolidx.sum()
return
def get_bsmt_egrds(self, set_f=False):
logger = self.logger.getChild('get_bsmt_egrds')
df = self.childmeta_df
if not 'bsmt_egrd' in df.columns:
#self.session.state
raise IOError
#basement exposure grade
logger.debug('getting bsmt_egrd stats on %s'%(str(df.shape)))
d = dict()
for grade in ['wet', 'dry', 'damp']: #loop through and count all the finds
#get count
boolidx = df.loc[:,'bsmt_egrd'] == grade
cnt = boolidx.sum()
d[grade] = cnt
#set as attribute
if set_f:
new_an = '%s_cnt'%grade
setattr(self, new_an, cnt)
logger.debug('for bsmt_egrd = \'%s\' found %i'%(grade,cnt))
return d
def write(self): #write the current binv to file
logger = self.logger.getChild('write')
df = self.childmeta_df
"""
hp.pd.v(df)
"""
filename = '%s binv'%(self.session.state)
filehead = self.model.tstep_o.outpath
filepath = os.path.join(filehead, filename)
hp.pd.write_to_file(filepath, df, logger=logger)
return
class House(
udev.scripts.House_udev,
hp.plot.Plot_o,
hp.dyno.Dyno_wrap,
hp.sim.Sim_o,
hp.oop.Parent, #building/asset objects
hp.oop.Child):
#===========================================================================
# program pars
#==========================================================================
geocode_list = ['area', 'per', 'height', 'inta'] #sufficxs of geometry attributes to search for (see set_geo)
finish_code_list = ['f', 'u', 't'] #code for finished or unfinished
#===========================================================================
# debugging
#===========================================================================
last_floodo = None
#===========================================================================
# user provided pars
#===========================================================================
dem_el = None
hse_type = None # Class + Type categorizing the house
anchor_el = None # anchor elevation for house relative to datum (generally main floor el)
gis_area = None #foot print area (generally from teh binv)
bsmt_f = True
area_prot_lvl = 0 #level of area protection
B_f_height = None
#defaults passed from model
"""While the ICS for these are typically uniform and broadcast down by the model,
these need to exist on the House, so we can spatially limit our changes"""
G_anchor_ht = None #default garage anchor height (chosen aribtrarily by IBI (2015)
joist_space = None #space between basement and mainfloor. used to set the
#===========================================================================
# calculated pars
#===========================================================================
floodo = None #flood object flooding the house
# #geometry placeholders
#geo_dxcol_blank = None #blank dxcol for houes geometry
geo_dxcol = None
'keeping just this one for reporting and dynp'
boh_min_val = None #basement open height minimum value
#===========================================================================
# B_f_area = None #basement finished (liveable) area
# B_f_per = None #perimeter
# B_f_inta = None
#
# B_u_area = None
# B_u_per = None
# B_u_inta = None
#
# M_f_area = None
# M_f_per = None
# M_f_inta = None
#
# M_u_area = None #mainfloor non-finisehd area
# M_u_per = None
# M_u_inta = None
#
# """
# For garages, the assessment records have the area under
# BLDG_TOTAL_NONLIV_AREA_ABOVE and P2.
# average = 48m2.
# for the legacy rfda dmg_feat_tables, we don't know what the base area was for the garage
# lets assume 50m2
# also, these are usually pretty square
# """
# G_f_area = None
# G_f_per = None
# G_f_inta = None
#
# G_u_area = None
# G_u_per = None
# G_u_inta = None
#
# #heights
# """these are interior aeras, Bheight + joist space = B_anchor_ht
# assumed some typical values from the internet.
# really shouldnt use the NONE values here.. these are just placeholders"""
# M_f_height = None #default mainfloor height
# B_f_height = None
# G_f_height = None
#
# M_u_height = None
# B_u_height = None
# G_u_height = None
#===========================================================================
# #anchoring
"""
Im keeping anchor heights separate from geometry attributes as these could still apply
even for static dmg_feats
"""
bsmt_opn_ht = 0.0 #height of lowest basement opening
damp_spill_ht = 0.0
vuln_el = 9999 #starter value
# personal property protection
bkflowv_f = False #flag indicating the presence of a backflow valve on this property
sumpump_f = False
genorat_f = False
bsmt_egrd = ''
#statistics
BS_ints = 0.0 #some statistic of the weighted depth/damage of the BS dfunc
max_dmg = 0.0 #max damage possible for this house
#===========================================================================
# data containers
#===========================================================================
dd_df = None #df results of total depth damage
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('House')
logger.debug('start _init_')
#=======================================================================
# attach pre init atts
#=======================================================================
#self.model = self.parent.model #pass the Fdmg model down
'put this here just to keep the order nice and avoid the unresolved import error'
self.inherit_parent_ans=set(['mind', 'model'])
#=======================================================================
# #initilzie teh baseclass
#=======================================================================
super(House, self).__init__(*vars, **kwargs)
if self.db_f:
if self.model is None: raise IOError
#=======================================================================
#common setup
#=======================================================================
if self.sib_cnt == 0:
logger.debug("sib_cnt=0. setting atts")
self.kid_class = Dfunc
self.childmeta_df = self.model.house_childmeta_df #dfunc meta data
self.joist_space = self.model.joist_space
self.G_anchor_ht = self.model.G_anchor_ht
#=======================================================================
# unique se5tup
#=======================================================================
self.bldg_id = int(getattr(self, self.mind ))
#self.ayoc = int(self.ayoc)
#self.area_prot_lvl = int(self.area_prot_lvl)
self.bsmt_f = hp.basic.str_to_bool(self.bsmt_f, logger=self.logger)
if not 'B' in self.model.place_codes:
self.bsmt_f = False
'these need to be unique. calculated during init_dyno()'
self.post_upd_func_s = set([self.calc_statres_hse])
"""ahndled by dyno
self.reset_d['hse_type'] = self.hse_type
'using this for type change checking'
self.kid_class
"""
logger.debug('building the house \n')
self.build_house()
logger.debug('raising my dfuncs \n')
self.raise_dfuncs()
logger.debug('init_dyno \n')
self.init_dyno()
#=======================================================================
# cheking
#=======================================================================
if self.db_f: self.check_house()
logger.debug('_init_ finished as %i \n'%self.bldg_id)
return
def check_house(self):
logger = self.logger.getChild('check_house')
if not self.model.__repr__() == self.parent.parent.__repr__():
raise IOError
#=======================================================================
# garage area check
#=======================================================================
g_area = self.geo_dxcol.loc['area',('G','u')]
if g_area > self.gis_area:
logger.error('got garage area greater than foot print for the house!')
"""if we use the legacy areas for the garage curves this will often be the case
raise IOError"""
return
def build_house(self): #buidl yourself from the building inventory
"""
#=======================================================================
# CALLS
#=======================================================================
binv.raise_children()
spawn_child()
"""
logger = self.logger.getChild('build_house')
#=======================================================================
# custom loader functions
#=======================================================================
#self.set_binv_legacy_atts() #compile data from legacy (rfda) inventory syntax
logger.debug('\n')
self.set_geo_dxcol() #calculate the geometry (defaults) of each floor
logger.debug('\n')
self.set_hse_anchor()
""" a bit redundant, but we need to set the bsmt egrade regardless for reporting consistency
'these should be accessible regardless of dfeats as they only influence the depth calc'"""
self.set_bsmt_egrd()
if self.bsmt_f:
logger.debug('\n')
self.set_bsmt_opn_ht()
logger.debug('set_damp_spill_ht() \n')
self.set_damp_spill_ht()
#=======================================================================
# value
#=======================================================================
'need a better way to do this'
self.cont_val = self.value * self.model.cont_val_scale
if self.db_f:
if self.gis_area < self.model.gis_area_min:
raise IOError
if self.gis_area > self.model.gis_area_max: raise IOError
logger.debug('finished as %s \n'%self.hse_type)
def raise_dfuncs(self): #build dictionary with damage functions for each dmg_type
"""
2018 06 05: This function isnt setup very well
called by spawn_child and passing childmeta_df (from dfunc tab. see above)
this allows each dfunc object to be called form the dictionary by dmg_type
dfunc_df is sent as the childmeta_df (attached during __init__)
#=======================================================================
# INPUTS
#=======================================================================
dfunc_df: df with headers:
these are typically assigned from the 'dfunc' tab on the pars.xls
#=======================================================================
# TESTING
#=======================================================================
hp.pd.v(childmeta_df)
"""
#=======================================================================
# #defautls
#=======================================================================
logger = self.logger.getChild('raise_dfuncs')
logger.debug('starting')
#self.kids_d = dict() #reset this just incase
df = self.childmeta_df
'this is a slice from the dfunc tab made by Fdmg.load_pars_dfunc'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not hp.pd.isdf(df): raise IOError
if len(df) == 0: raise IOError
if not self.kid_class == Dfunc:
raise IOError
if len(self.kids_d) > 0: raise IOError
#=======================================================================
# compile for each damage type
#=======================================================================
self.dfunc_d = self.raise_children_df(df,
kid_class = self.kid_class,
dup_sibs_f = True)
#=======================================================================
# closeout and wrap up
#=======================================================================
logger.debug('built %i dfunc children: %s'%(len(self.dfunc_d), self.dfunc_d.keys()))
return
def set_hse_anchor(self):
'pulled this out so updates can be made to dem_el'
if self.is_frozen('anchor_el'): return True
anchor_el = self.dem_el + float(self.ff_height) #height + surface elevation
#set the update
self.handle_upd('anchor_el', anchor_el, proxy(self), call_func = 'set_hse_anchor')
return True
def set_bsmt_opn_ht(self):
"""
bsmt_open_ht is used by dfuncs with bsmt_e_grd == 'damp' and damp_func_code == 'spill'
for low water floods
"""
#=======================================================================
# shortcuts
#=======================================================================
if not self.bsmt_f: return True
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
if self.is_frozen('bsmt_opn_ht'): return True
dep_l = [([self], ['set_hse_anchor()', 'set_geo_dxcol()'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_bsmt_opn_ht'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_opn_ht')
#=======================================================================
# from user provided minimum
#=======================================================================
if self.model.bsmt_opn_ht_code.startswith('*min'):
#first time calcs
if self.boh_min_val is None:
'this means we are non dynamic'
s_raw = self.model.bsmt_opn_ht_code
s = re.sub('\)', '',s_raw[5:])
self.boh_min_val = float(s) #pull the number out of the brackets
min_val = self.boh_min_val
# get the basement anchor el
B_f_height = float(self.geo_dxcol.loc['height',('B','t')]) #pull from frame
bsmt_anchor_el = self.anchor_el - B_f_height - self.joist_space#basement curve
#get the distance to grade
bsmt_to_dem = self.dem_el - bsmt_anchor_el
#take the min of all three
bsmt_opn_ht = min(B_f_height, bsmt_to_dem, min_val)
if self.db_f:
#detailed output
boolar = np.array([B_f_height, bsmt_to_dem, min_val]) == bsmt_opn_ht
selected = np.array(['B_f_height', 'bsmt_to_dem', 'min_val'])[boolar]
logger.debug('got bsmt_opn_ht = %.2f from \'%s\''%(bsmt_opn_ht, selected[0]))
else:
logger.debug('got bsmt_opn_ht = %.2f ')
#=======================================================================
# from user provided float
#=======================================================================
else:
bsmt_opn_ht = float(self.model.bsmt_opn_ht_code)
#=======================================================================
# wrap up
#=======================================================================
self.handle_upd('bsmt_opn_ht', bsmt_opn_ht, proxy(self), call_func = 'set_bsmt_opn_ht')
if self.db_f:
if not bsmt_opn_ht > 0:
raise IOError
return True
def set_damp_spill_ht(self):
damp_spill_ht = self.bsmt_opn_ht / 2.0
self.handle_upd('damp_spill_ht', damp_spill_ht, proxy(self), call_func = 'set_damp_spill_ht')
return True
def set_bsmt_egrd(self): #calculate the basement exposure grade
"""
bkflowv_f sumpump_f genorat_f
There is also a globabl flag to indicate whether bsmt_egrd should be considered or not
for the implementation of the bsmt_egrd in determining damages, see Dfunc.get_dmg_wsl()
#=======================================================================
# CALLS
#=======================================================================
this is now called during every get_dmgs_wsls()... as gpwr_f is a function of the Flood object
consider only calling w
"""
#=======================================================================
# shortcuts
#=======================================================================
if self.is_frozen('bsmt_egrd'):return
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_egrd')
if self.bsmt_f:
#=======================================================================
# from plpms
#=======================================================================
if self.model.bsmt_egrd_code == 'plpm':
cond = 'plpm'
#=======================================================================
# get the grid power state
#=======================================================================
if self.session.state == 'init':
gpwr_f = self.model.gpwr_f
cond = cond + '.init'
else:
gpwr_f = self.floodo.gpwr_f
cond = '%s.%s'%(cond, self.floodo.name)
#=======================================================================
# grid power is on
#=======================================================================
if gpwr_f:
cond = cond + '.on'
if self.bkflowv_f and self.sumpump_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or self.sumpump_f:
bsmt_egrd = 'damp'
else:
bsmt_egrd = 'wet'
#=======================================================================
# grid power is off
#=======================================================================
else:
cond = cond + '.off'
if self.bkflowv_f and self.sumpump_f and self.genorat_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or (self.sumpump_f and self.genorat_f):
bsmt_egrd = 'damp'
else: bsmt_egrd = 'wet'
self.gpwr_f = gpwr_f #set this
logger.debug('set bsmt_egrd = %s (from \'%s\') with grid_power_f = %s'%(bsmt_egrd,self.bsmt_egrd, gpwr_f))
#=======================================================================
# ignore bsmt_egrd
#=======================================================================
elif self.model.bsmt_egrd_code == 'none':
cond = 'none'
bsmt_egrd = 'wet'
#=======================================================================
# allow the user to override all
#=======================================================================
elif self.model.bsmt_egrd_code in ['wet', 'damp', 'dry']:
cond = 'global'
bsmt_egrd = self.model.bsmt_egrd_code
else:
raise IOError
else:
cond = 'nobsmt'
bsmt_egrd = 'nobsmt'
#=======================================================================
# wrap up
#=======================================================================
self.bsmt_egrd = bsmt_egrd
"""report/collect on the flood
self.parent.childmeta_df.loc[self.dfloc,'bsmt_egrd'] = bsmt_egrd"""
"""
if self.db_f:
if not self.session.state == 'init':
#===============================================================
# write the beg histor y
#===============================================================
if self.model.beg_hist_df is None:
self.model.beg_hist_df.loc[self.dfloc, self.floodo.name] = '%s_%s'%(bsmt_egrd, cond)"""
return cond
def set_geo_dxcol(self): #calculate the geometry of each floor based on the geo_build_code
"""
builds a dxcol with all the geometry attributes of this house
called by load_data when self.session.wdfeats_f = True
#=======================================================================
# KEY VARS
#=======================================================================
geo_build_code: code to indicate what geometry to use for the house. see the dfunc tab
'defaults': see House.get_default_geo()
'from_self': expect all geo atts from the binv.
'any': take what you can from the binv, everything else use defaults.
'legacy': use gis area for everything
gbc_override: used to override the geo_build_code
geo_dxcol: house geometry
#=======================================================================
# UDPATES
#=======================================================================
when a specific geometry attribute of the house is updated (i.e. B_f_height)
this dxcol needs to be rebuilt
and all the dfuncs need to run build_dd_ar()
"""
logger = self.logger.getChild('set_geo_dxcol')
if self.is_frozen('geo_dxcol', logger=logger):
return True
pars_dxcol = self.session.pars_df_d['hse_geo'] #pull the pars frame
#=======================================================================
# get default geometry for this house
#=======================================================================
self.defa = self.gis_area #default area
if self.defa <=0:
logger.error('got negative area = %.2f'%self.defa)
raise IOError
self.defp = 4*math.sqrt(self.defa)
#=======================================================================
# setup the geo_dxcol
#=======================================================================
dxcol = self.model.geo_dxcol_blank.copy() #get a copy of the blank one\
'I need to place the reference herer so that geometry attributes have access to each other'
#self.geo_dxcol = dxcol
place_codes = dxcol.columns.get_level_values(0).unique().tolist()
#finish_codes = dxcol.columns.get_level_values(1).unique().tolist()
#geo_codes = dxcol.index
logger.debug("for hse_type \'%s\' from geo_dxcol_blank %s filling:"%(self.hse_type, str(dxcol.shape)))
#=======================================================================
# #loop through each place code and compile the appropriate geometry
#=======================================================================
for place_code in place_codes:
geo_df = dxcol[place_code] #geometry for just this place
pars_df = pars_dxcol[place_code]
#logger.debug('filling geo_df for place_code: \'%s\' '%(place_code))
#===================================================================
# #loop through and build the geometry by each geocode
#===================================================================
for geo_code, row in geo_df.iterrows():
for finish_code, value in row.iteritems():
#===========================================================
# total column
#===========================================================
if finish_code == 't':
uval = dxcol.loc[geo_code, (place_code, 'u')]
fval = dxcol.loc[geo_code, (place_code, 'f')]
if self.db_f:
if np.any(pd.isnull([uval, fval])):
raise IOError
if geo_code == 'height': #for height, take the maximum
att_val = max(uval, fval)
else: #for other geometry, take the total
att_val = uval + fval
#===========================================================
# finish/unfinished
#===========================================================
else:
#get the user passed par for this
gbc = pars_df.loc[geo_code, finish_code]
try:gbc = float(gbc)
except: pass
#===========================================================
# #assemble per the geo_build_code
#===========================================================
#user specified code
if isinstance(gbc, basestring):
gbc = str(gbc)
if gbc == '*binv':
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = getattr(self, att_name) #get this attribute from self
elif gbc == '*geo':
att_val = self.calc_secondary_geo(place_code, finish_code, geo_code, dxcol=dxcol) #calculate the default value
elif gbc.startswith('*tab'):
#get the pars
tabn = re.sub('\)',"",gbc[5:]) #remove the end parentheisis
df = self.session.pars_df_d[tabn]
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = self.get_geo_from_other(df, att_name)
else:
att_val = getattr(self, gbc)
#user speciifed value
elif isinstance(gbc, float): #just use the default value provided in the pars
att_val = gbc
else: raise IOError
logger.debug('set %s.%s.%s = %.2f with gbc \'%s\''%(place_code,finish_code,geo_code, att_val, gbc))
#===========================================================
# value checks
#===========================================================
if self.db_f:
if not isinstance(att_val, float):
raise IOError
if pd.isnull(att_val):
raise IOError
if att_val < 0:
raise IOError
if att_val is None:
raise IOError
#===========================================================
# set the value
#===========================================================
dxcol.loc[geo_code, (place_code, finish_code)] = att_val
#row[finish_code] = att_val #update the ser
#logger.debug('set \'%s\' as \'%s\''%(att_name, att_val))
#=======================================================================
# special attribute setting
#=======================================================================
'need this as an attribute for reporting'
B_f_height = dxcol.loc['height', ('B', 'f')]
#===============================================================
# POST
#===============================================================
#logger.debug('built house_geo_dxcol %s'%str(dxcol.shape))
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'set_geo_dxcol')
self.handle_upd('B_f_height', B_f_height, weakref.proxy(self), call_func = 'set_geo_dxcol')
return True
def calc_secondary_geo(self, #aset the default geometry for this attribute
place_code, finish_code, geo_code,
dxcol = None):
logger = self.logger.getChild('get_default_geo')
#=======================================================================
# get primary geometrty from frame
#=======================================================================
if dxcol is None: dxcol = self.geo_dxcol
area = dxcol.loc['area',(place_code, finish_code)]
height = dxcol.loc['height',(place_code, finish_code)]
#=======================================================================
# calculate the geometris
#=======================================================================
if geo_code == 'inta':
per = dxcol.loc['per',(place_code, finish_code)]
att_value = float(area + height * per)
elif geo_code == 'per':
per = 4*math.sqrt(area)
att_value = float(per)
else: raise IOError
logger.debug(" for \'%s\' found %.2f"%(geo_code, att_value))
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
for v in [area, height, per, att_value]:
if not isinstance(v, float): raise IOError
if pd.isnull(v):
raise IOError
if not v >= 0: raise IOError
return att_value
def get_geo_from_other(self, df_raw, attn_search): #set the garage area
"""
we need this here to replicate the scaling done by the legacy curves on teh garage dmg_feats
assuming column 1 is the cross refereence data
"""
logger = self.logger.getChild('get_geo_from_other')
#=======================================================================
# find the cross reference row
#=======================================================================
cross_attn = df_raw.columns[0]
cross_v = getattr(self, cross_attn) #get our value for this
boolidx = df_raw.iloc[:,0] == cross_v #locate our cross reference
#=======================================================================
# find the search column
#=======================================================================
boolcol = df_raw.columns == attn_search
value_fnd = df_raw.loc[boolidx, boolcol].iloc[0,0] #just take the first
if self.db_f:
if not boolidx.sum() == 1:
raise IOError
if not boolidx.sum() == 1:
raise IOError
return value_fnd
def run_hse(self, *vargs, **kwargs):
'TODO: compile the total dfunc and use that instead?'
logger = self.logger.getChild('run_hse')
self.run_cnt += 1
#=======================================================================
# precheck
#=======================================================================
"""todo: check that floods are increasing
if self.db_f:
if self.last_floodo is None:
pass"""
#=======================================================================
# basement egrade reset check
#=======================================================================
if self.model.bsmt_egrd_code == 'plpm':
if self.run_cnt ==1:
cond = self.set_bsmt_egrd()
elif not self.gpwr_f == self.floodo.gpwr_f:
cond = self.set_bsmt_egrd()
else:
cond = 'nochng'
logger.debug('no change in gpwr_f. keeping bsmt egrd = %s'%self.bsmt_egrd)
else:
cond = 'no_plpm'
#===============================================================
# write the beg histor y
#===============================================================
if not self.model.beg_hist_df is None:
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'bsmt_egrd')] = self.bsmt_egrd
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'cond')] = cond
logger.debug('returning get_dmgs_wsls \n')
results = self.get_dmgs_wsls(*vargs, **kwargs)
self.floodo = None #clear this
return results
def get_dmgs_wsls(self, #get damage at this depth from each Dfunc
wsl,
dmg_rat_f = False, #flat to include damage ratios in the outputs
#res_ser=None,
#dmg_type_list=None,
):
"""
#=======================================================================
# INPUTS
#=======================================================================
res_ser: shortcut so that damage are added to this series
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmgs_wsls')
#=======================================================================
# calculate damages by type
#=======================================================================
id_str = self.get_id()
#=======================================================================
# fast calc
#=======================================================================
if not dmg_rat_f:
dmg_ser = pd.Series(name = self.name, index = self.dfunc_d.keys())
"""
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\'\n'
%(id_str, wsl, self.anchor_el, len(dmg_ser), self.bsmt_egrd))"""
for dmg_type, dfunc in self.kids_d.iteritems():
logger.debug('getting damages for \'%s\' \n'%dmg_type)
#get the damge
_, dmg_ser[dmg_type], _ = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#=======================================================================
# full calc
#=======================================================================
else:
raise IOError #check this
dmg_df = pd.DataFrame(index = self.dfunc_d.keys(), columns = ['depth', 'dmg', 'dmg_raw'])
dmg_ser = pd.Series()
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\'\n'
%(id_str, wsl, self.anchor_el, len(dmg_df), self.bsmt_egrd))
for indx, row in dmg_df.iterrows():
dfunc = self.kids_d[indx]
row['depth'], row['dmg'], row['dmg_raw'] = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#enter into series
dmg_ser[indx] = row['dmg']
dmg_ser['%s_rat'%indx] = row['dmg_raw']
#=======================================================================
# wrap up
#=======================================================================
logger.debug('at %s finished with %i dfuncs queried and res_ser: \n %s \n'
%(self.model.tstep_o.name, len(self.kids_d), dmg_ser.values.tolist()))
if self.db_f:
#check dfeat validity
if 'BS' in self.kids_d.keys():
dfunc = self.kids_d['BS']
d = dfunc.kids_d
for k, v in d.iteritems():
if not v.hse_type == self.hse_type:
logger.error('%s.%s hse_type \'%s\' does not match mine \'%s\''
%(v.parent.name, v.name, v.hse_type, self.hse_type))
raise IOError
return dmg_ser
def raise_total_dfunc(self, #compile the total dd_df and raise it as a child
dmg_codes = None, place_codes = None):
""" this is mostly used for debugging and comparing of curves form differnet methods
#=======================================================================
# todo
#=======================================================================
allow totaling by
possible performance improvement;
compile the total for all objects, then have Flood.get_dmg_set only run the totals
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('raise_total_dfunc')
tot_name = self.get_tot_name(dmg_codes)
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
#=======================================================================
# get the metadata for the child
#=======================================================================
df_raw = self.session.pars_df_d['dfunc'] #start with the raw tab data
#search by placecode
boolidx1 = df_raw['place_code'] == 'total' #identify all the entries except total
#search by dmg_code where all strings in the list are a match
boolidx2 = hp.pd.search_str_fr_list(df_raw['dmg_code'], dmg_codes, all_any='any') #find
if boolidx2.sum() <1:
logger.warning('unable to find a match in the dfunc tab for %s. using default'%tot_name)
boolidx2 = pd.Series(index = boolidx2.index, dtype = np.bool) #all true
'todo: add some logic for only finding one of the damage codes'
#get this slice
boolidx = np.logical_and(boolidx1, boolidx2)
if not boolidx.sum() == 1:
logger.error('childmeta search boolidx.sum() = %i'%boolidx.sum())
raise IOError
att_ser = df_raw[boolidx].iloc[0]
'need ot add the name here as were not using the childname override'
logger.debug('for place_code: \'total\' and dmg_code: \'%s\' found child meta from dfunc_df'%(dmg_codes))
#=======================================================================
# raise the child
#=======================================================================
#set the name
child = self.spawn_child(att_ser = att_ser, childname = tot_name)
#=======================================================================
# #do custom edits for total
#=======================================================================
child.anchor_el = self.anchor_el
#set the dd_ar
dd_df = self.get_total_dd_df(dmg_codes, place_codes)
depths = dd_df['depth'].values - child.anchor_el #convert back to no datum
child.dd_ar = np.array([depths, dd_df['damage'].values])
#add this to thedictionary
self.kids_d[child.name] = child
logger.debug('copied and edited a child for %s'%child.name)
return child
def get_total_dd_df(self, dmg_codes, place_codes): #get the total dd_df (across all dmg_types)
logger = self.logger.getChild('get_total_dd_df')
#=======================================================================
# compile al lthe depth_damage entries
#=======================================================================
df_full = pd.DataFrame(columns = ['depth', 'damage_cum', 'source'])
# loop through and fill the df
cnt = 0
for datoname, dato in self.kids_d.iteritems():
if not dato.dmg_code in dmg_codes: continue #skip this one
if not dato.place_code in place_codes: continue
cnt+=1
#===================================================================
# get the adjusted dd
#===================================================================
df_dato = pd.DataFrame() #blank frame
df_dato['depth'] = dato.dd_ar[0]+ dato.anchor_el #adjust the dd to the datum
df_dato['damage_cum'] = dato.dd_ar[1]
"""the native format of the dmg_ar is cumulative damages
to sum these, we need to back compute to incremental
"""
df_dato['damage_inc'] = hp.pd.get_incremental(df_dato['damage_cum'], logger=logger)
df_dato['source'] = datoname
#append these to the full
df_full = df_full.append(df_dato, ignore_index=True)
logger.debug('compiled all dd entries %s from %i dfuncs with dmg_clodes: %s'
%(str(df_full.shape), cnt, dmg_codes))
df_full = df_full.sort_values('depth').reset_index(drop=True)
#=======================================================================
# harmonize this into a dd_ar
#=======================================================================
#get depths
depths_list = df_full['depth'].sort_values().unique().tolist()
#get starter frame
dd_df = pd.DataFrame(columns = ['depth', 'damage'])
dd_df['depth'] = depths_list #add in the depths
for index, row in dd_df.iterrows(): #sort through and sum by depth
boolidx = df_full['depth'] <= row['depth'] #identify all those entries in the full
row['damage'] = df_full.loc[boolidx, 'damage_inc'].sum() #add these as the sum
dd_df.iloc[index,:] = row #update the master
logger.debug('harmonized and compiled dd_df %s'%str(dd_df.shape))
self.dd_df = dd_df
return dd_df
def get_tot_name(self, dmg_codes): #return the equilvanet tot name
'not sure whats going on here'
new_str = 'total_'
for dmg_code in dmg_codes: new_str = new_str + dmg_code
return new_str
def calc_statres_hse(self): #calculate statistics for the house (outside of a run)
"""
#=======================================================================
# CALLS
#=======================================================================
this is always called with mypost_update() executing each command in self.post_upd_func_s()
mypost_update() is called:
init_dyno() #first call before setting the OG values
session.post_update() #called at the end of all the update loops
"""
logger = self.logger.getChild('calc_statres_hse')
s = self.session.outpars_d[self.__class__.__name__]
#=======================================================================
# BS_ints
#=======================================================================
if 'BS_ints' in s:
'I dont like this as it requires updating the child as well'
"""rfda curves also have this stat
if self.dfunc_type == 'dfeats':"""
#updat eht ekid
if not self.kids_d['BS'].calc_intg_stat(): raise IOError
self.BS_ints = self.kids_d['BS'].intg_stat
"""this is handled by set_og_vals()
if self.session.state == 'init':
self.reset_d['BS_ints'] = self.BS_ints"""
logger.debug('set BS_ints as %.4f'%self.BS_ints)
if 'vuln_el' in s:
self.set_vuln_el()
if 'max_dmg' in s:
self.max_dmg = self.get_max_dmg()
self.parent.childmeta_df.loc[self.dfloc, 'max_dmg'] = self.max_dmg #set into the binv_df
return True
def set_vuln_el(self): #calcualte the minimum vulnerability elevation
"""
#=======================================================================
# CALLS
#=======================================================================
TODO: consider including some logic for bsmt_egrade and spill type
"""
#=======================================================================
# check frozen and dependenceis
#=======================================================================
logger = self.logger.getChild('set_vuln_el')
"""this is a stat, not a dynamic par
if self.is_frozen('vuln_el', logger=logger): return True"""
vuln_el = 99999 #starter value
for dmg_type, dfunc in self.kids_d.iteritems():
vuln_el = min(dfunc.anchor_el, vuln_el) #update with new minimum
logger.debug('set vuln_el = %.2f from %i dfuncs'%(vuln_el, len(self.kids_d)))
self.vuln_el = vuln_el
return True
def get_max_dmg(self): #calculate the maximum damage for this house
#logger = self.logger.getChild('get_max_dmg')
ser = pd.Series(index = self.kids_d.keys())
#=======================================================================
# collect from each dfunc
#=======================================================================
for dmg_type, dfunc in self.kids_d.iteritems():
ser[dmg_type] = dfunc.dd_ar[1].max()
return ser.sum()
def plot_dd_ars(self, #plot each dfunc on a single axis
datum='house', place_codes = None, dmg_codes = None, plot_tot = False,
annot=True, wtf=None, title=None, legon=False,
ax=None,
transparent = True, #flag to indicate whether the figure should have a transparent background
**kwargs):
"""
#=======================================================================
# INPUTS
#=======================================================================
datum: code to indicate what datum to plot the depth series of each dd_ar
None: raw depths (all start at zero)
real: depths relative to the project datum
house: depths relative to the hse_obj anchor (generally Main = 0)
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dd_ars')
if wtf==None: wtf= self.session._write_figs
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
if title is None:
title = 'plot_dd_ars on %s for %s and %s'%(self.name, dmg_codes, place_codes)
if plot_tot: title = title + 'and T'
'this should let the first plotter setup the axis '
logger.debug('for \n dmg_codes: %s \n place_codes: %s'%(dmg_codes, place_codes))
#=======================================================================
# plot the dfuncs that fit the criteria
#=======================================================================
dfunc_nl = [] #list of dfunc names fitting criteria
for datoname, dato in self.dfunc_d.iteritems():
if not dato.dmg_code in dmg_codes: continue
if not dato.place_code in place_codes: continue
ax = dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
dfunc_nl.append(dato.name)
#=======================================================================
# add the total plot
#=======================================================================
if plot_tot:
#get the dato
tot_name = self.get_tot_name(dmg_codes)
if not tot_name in self.kids_d.keys(): #build it
'name searches should still work'
tot_dato = self.raise_total_dfunc(dmg_codes, place_codes)
else:
tot_dato = self.kids_d[tot_name]
#plot the dato
ax = tot_dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
#=======================================================================
# add annotation
#=======================================================================
if not annot is None:
if annot:
"""WARNING: not all attributes are generated for the differnt dfunc types
"""
B_f_height = float(self.geo_dxcol.loc['height',('B','f')]) #pull from frame
annot_str = 'hse_type = %s\n'%self.hse_type +\
' gis_area = %.2f m2\n'%self.gis_area +\
' anchor_el = %.2f \n'%self.anchor_el +\
' dem_el = %.2f\n'%self.dem_el +\
' B_f_height = %.2f\n'%B_f_height +\
' bsmt_egrd = %s\n'%self.bsmt_egrd +\
' AYOC = %i\n \n'%self.ayoc
#add info for each dfunc
for dname in dfunc_nl:
dfunc = self.dfunc_d[dname]
annot_str = annot_str + annot_builder(dfunc)
else: annot_str = annot
#=======================================================================
# Add text string 'annot' to lower left of plot
#=======================================================================
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
x_text = xmin + (xmax - xmin)*.7 # 1/10 to the right of the left axis
y_text = ymin + (ymax - ymin)*.01 #1/10 above the bottom axis
anno_obj = ax.text(x_text, y_text, annot_str)
#=======================================================================
# save figure
#=======================================================================
if wtf:
"""
self.outpath
"""
fig = ax.figure
flag = hp.plot.save_fig(self, fig, dpi = self.dpi, legon=legon, transparent = transparent)
if not flag: raise IOError
logger.debug('finished as %s'%title)
return ax
def write_all_dd_dfs(self, tailpath = None): #write all tehchildrens dd_dfs
if tailpath is None: tailpath = os.path.join(self.outpath, self.name)
if not os.path.exists(tailpath): os.makedirs(tailpath)
for gid, childo in self.kids_d.iteritems():
if not childo.dfunc_type == 'dfeats': continue #skip this one\
filename = os.path.join(tailpath, childo.name + ' dd_df.csv')
childo.recompile_dd_df(outpath = filename)
class Dfunc(
hp.plot.Plot_o,
hp.dyno.Dyno_wrap,
hp.sim.Sim_o, #damage function of a speciic type. to be attached to a house
hp.oop.Parent,
hp.oop.Child):
'''
#===========================================================================
# architecture
#===========================================================================
rfda per house predicts 4 damage types (MS, MC, BS, BC)\
do we want these damage types comtained under one Dfunc class object? or separate?
lets keep them separate. any combining can be handled in the House class
#===========================================================================
# main vars
#===========================================================================
dd_ar: main damage array (np.array([depth_list, dmg_list])) data
using np.array for efficiency
this is compiled based on dfunc_type see:
legacy: get_ddar_rfda
abmri: get_ddar_dfeats (this requires some intermittent steps)
'''
#===========================================================================
# program pars
#===========================================================================
"""post_cmd_str_l = ['build_dfunc']
# object handling overrides
load_data_f = True
raise_kids_f = True #called explicilly in load_data()"""
#raise_in_spawn_f = True #load all the children before moving on to the next sibling
db_f = False
"""
#===========================================================================
# #shadow kids
#===========================================================================
see note under Dfeats
"""
reset_shdw_kids_f = False #flag to install the shadow_kids_d during reset
shdw_kids_d = None #placeholder for the shadow kids
kid_cnt = 0 #number of kids you have
#===========================================================================
# passed pars from user
#===========================================================================
place_code = None
dmg_code = None
dfunc_type =''
bsmt_egrd_code = ''
anchor_ht_code = None
geo_build_code = None
rat_attn = '*none' #attribute name to scale by for relative damage functions
#===========================================================================
# calculation pars
#===========================================================================
dd_ar = None #2d array of depth (dd_ar[0])vs total damage (dd_ar[1]) values
dmg_type = None #type of damage predicted by this function
anchor_el = 0.0 #height from project datum to the start of the dd_ar (depth = 0)
#headers to keep in the dyn_dmg_df
dd_df_cols = ['name', 'base_price', 'depth', 'calc_price']
depth_allow_max = 10 #maximum depth to allow without raising an error with dg_f = True.
'10m seems reasonable for a 12ft basement and 1000 yr flood'
tag = None #type of dfeats curve
intg_stat = None #placeholder for this stat
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Dfunc')
logger.debug('start _init_')
#=======================================================================
# update program handlers
#=======================================================================
self.inherit_parent_ans=set(['mind', 'model'])
super(Dfunc, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
#common setup
#=======================================================================
if self.sib_cnt == 0:
logger.debug('sib_cnt = 0. setting complex atts')
self.kid_class = Dmg_feat #mannually pass/attach this
self.hse_o = self.parent
'this should be a proxy'
#=======================================================================
# #unique
#=======================================================================
'for now, only using this on BS curves'
if self.name == 'BS':
self.post_upd_func_s = set([self.calc_statres_dfunc])
#misc
self.label = self.name + ' (%s) (%s)'%(self.dfunc_type, self.units)
""" keep as string
#relative curves
if self.rat_attn == '*none':
self.rat_attn = None"""
if not self.place_code == 'total':
#loaders
logger.debug('build_dfunc \n')
self.build_dfunc()
logger.debug('init_dyno \n')
self.init_dyno()
#=======================================================================
# checks
#=======================================================================
if self.db_f:
logger.debug("checking myself \n")
self.check_dfunc()
if hasattr(self, 'kids_sd'):
raise IOError
self.logger.debug('finished _init_ as \'%s\' \n'%(self.name))
return
def check_dfunc(self):
logger = self.logger.getChild('check_dfunc')
logger.debug('checking')
"""not using the dyno_d any more
if not self.gid in self.session.dyno_d.keys():
raise IOError"""
if not self.place_code == 'B':
pass
if (self.place_code == 'G') & (self.dfunc_type == 'rfda'):
raise IOError
if self.dfunc_type == 'rfda':
if not self.rat_attn == 'self.parent.gis_area':
logger.error('for RFDA, expected \'gis_area\' for rat_attn')
raise IOError
elif self.dfunc_type == 'dfeats':
if not self.rat_attn =='*none':
logger.error('expected \'*none\' for rat_attn on dfeats')
raise IOError
if not self.rat_attn =='*none':
try:
_ = eval(self.rat_attn)
except:
logger.error('failed to execute passed \'%s\''%self.rat_attn)
raise IOError
#=======================================================================
# total checks
#=======================================================================
if self.place_code == 'total':
if self.anchor_ht_code == '*hse':
raise IOError #hse not allowed for total curve
return
def build_dfunc(self): #execute all the commands to build the dfunc from scratch
"""
#=======================================================================
# CALLS
#=======================================================================
_init_
handles
"""
'todo: move these commands elsewhere'
id_str = self.get_id()
logger = self.logger.getChild('build_dfunc(%s)'%id_str)
"""leaving this to more specific functions
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state == 'init':
dep_p = [([self.parent],['set_geo_dxcol()'] )] #dependency paring
if self.deps_is_dated(dep_p, method = 'force', caller = 'build_dfunc'):
raise IOError #because we are forcing this should alwys return FALSE"""
'need to clear this so the children will update'
self.del_upd_cmd(cmd_str = 'build_dfunc()')
self.del_upd_cmd(cmd_str = 'recompile_ddar()')
#=======================================================================
# custom loader funcs
#=======================================================================
logger.debug('set_dfunc_anchor() \n')
res1 = self.set_dfunc_anchor() #calculate my anchor elevation
logger.debug('build_dd_ar() \n')
res2 = self.build_dd_ar()
""" moved this into build_dd_ar
self.constrain_dd_ar()"""
#logger.debug('\n')
if self.session.state == 'init':
if self.dfunc_type == 'dfeats':
'add this here so the children have a chance to fill it out during load'
self.reset_d['childmeta_df'] = self.childmeta_df.copy()
else:
pass
"""some comands (constrain_dd_ar) we want to leave in the que
self.halt_update()"""
"""cleared this at the beginning
if len(self.upd_cmd_od) > 0:
self.del_upd_cmd(cmd_str = 'recompile_ddar()')"""
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if len(self.upd_cmd_od) > 0:
logger.warning('still have updates queud: \n %s'%self.upd_cmd_od.keys())
logger.debug('finished \n')
return True #never want to re-que this
def build_dd_ar(self): #buidl the damage curve from codes passed on the 'dfunc' tab
"""
#=======================================================================
# CALLS
#=======================================================================
build_dfunc
(this could be used by some handles...but not a great case)
"""
#=======================================================================
# dependencies
#=======================================================================
"""leaving these to the type specific dd_ar builders
#state = self.session.state != 'update'
_ = self.depend_outdated(search_key_l = ['set_geo_dxcol()'], #see if the parent has these
force_upd = True, #force the parent to update if found
halt = False) #clear all of my updates (except this func)"""
logger = self.logger.getChild('build_dd_ar')
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if self.dfunc_type == 'rfda':
"""switched to hse_geo tab
if not self.geo_build_code == 'defaults':
logger.error('dfunc_type=rfda only uses gis_area. therefore geo_build_code must = defaults')
raise IOError"""
if not self.anchor_ht_code == '*rfda':
logger.debug('dfunc_type=rfda got anchor_ht_code != rfda_par.')
'as were keeping the contents rfda, need to allow cross anchoring types'
elif self.dfunc_type == 'dfeats':
if self.dmg_code == 'C':
logger.error('Contents damage not setup for dfunc_type = dfeats')
raise IOError
elif self.dfunc_type == 'depdmg':
pass #not impoxing any restrictions?
else: raise IOError
#=======================================================================
# get raw curve data
#=======================================================================
if (self.place_code=='B') and (not self.parent.bsmt_f):
logger.debug('this building doesnt have a basement. dummy Dfunc')
self.dummy_f = True
dd_ar = np.array()
elif self.dfunc_type == 'rfda':#leagacy
logger.debug('dfunc_type = rfda. building')
dd_ar = self.get_ddar_rfda() #build the dfunc from this house type
self.kids_d = wdict()#empty placeholder
elif self.dfunc_type == 'dfeats':
logger.debug('dfunc_type = dfeats. raising children')
#grow all the damage features
self.raise_dfeats()
#compile the damage array
dd_ar = self.get_ddar_dfeats()
elif self.dfunc_type == 'depdmg':
logger.debug('dfunc_type = depdmg. building array')
dd_ar = self.get_ddar_depdmg()
self.kids_d = wdict()#empty placeholder
else: raise IOError
#=======================================================================
# wrap up
#=======================================================================
'constrain will set another copy onto this'
self.dd_ar = dd_ar
#=======================================================================
# this seems overly complicated...
# if not self.anchor_el is None:
# """ set anchor is called by load_data after this"""
# logger.debug('for session state \'%s\' running constrain_dd_ar'%(self.session.state))
#=======================================================================
#=======================================================================
# constrain_dd_ar
#=======================================================================
"""even thourgh we may receive multiple ques, this should be called everytime.
build_dfunc() will clear the que"""
res = self.constrain_dd_ar()
"""cosntrain_dd_ar will execute this
self.handle_upd('dd_ar', dd_ar, proxy(self), call_func = 'build_dd_ar')
'this will que constrain_dd_ar for non init runs'"""
#=======================================================================
# get stats
#=======================================================================
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if 'build_dd_ar()' in self.upd_cmd_od.keys(): raise IOError
if res:
if 'constrain_dd_ar()' in self.upd_cmd_od.keys(): raise IOError
"""
see note. not a strong case for queuing this command directly (with handles)
'because we are not using the update handler, just calling this'
self.del_upd_cmd(cmd_str = 'build_dd_ar') #delete yourself from the update command list"""
logger.debug('finished for dfunc_type \'%s\' and dd_ar %s \n'%(self.dfunc_type,str(self.dd_ar.shape)))
return True
def get_ddar_rfda(self): #build a specific curve from rfda classic
"""
#=======================================================================
# INPUTS
#=======================================================================
raw_dcurve_df: raw df from the standard rfda damage curve file
Ive left the sorting/cleaning to here'
may be slightly more efficient (although more confusing) to clean this in the session
#=======================================================================
# OUTPUTS
#=======================================================================
dd_ar: depth damage per m2
NOTE: this is different than the dd_ar for the dyn_ddars
reasoning for this is I want the parent calls to all be in the run loop
(rather than multiplying the rfda $/m2 by the parents m2 during _init_)
#=======================================================================
# TODO:
#=======================================================================
consider precompiling all of these and making pulls to a shadow set instead
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_ddar_rfda')
self.dfunc_type = 'rfda' #set the dfunc type
dmg_type = self.dmg_type
hse_type = self.hse_o.hse_type
raw_dcurve_df = self.model.kids_d['rfda_curve'].data
'need this goofy reference as the fdmg_o has not fully loaded'
if self.db_f:
if not hp.pd.isdf(raw_dcurve_df):
raise IOError
"""for new houses we should run this mid session
if not self.session.state == 'init':
raise IOError"""
logger.debug('for dmg_type = %s, hse_type = %s and raw_dcurve_df %s'%(dmg_type, hse_type, str(raw_dcurve_df.shape)))
#=======================================================================
# prechecks
#=======================================================================
#if dmg_type is None: raise IOError
#=======================================================================
# get the raw data
#=======================================================================
#clean the data
df1 = raw_dcurve_df.dropna(how = 'all', axis='index') #drop rows where ALL values ar na
df2 = df1.dropna(how = 'any', axis='columns') #drop columns where ANY values are na
#find the rows for this hse_type
boolidx = df2.iloc[:,0].astype(str).str.contains(hse_type) #
df3 = df2[boolidx]
#narrow down to this dmg_type
boolidx = df3.iloc[:,-1].astype(str).str.contains(dmg_type)
df4 = df3[boolidx]
dcurve_raw_list = df4.iloc[0,:].values.tolist() #where both are true
#checks
if len(dcurve_raw_list) == 0: raise IOError
#=======================================================================
# for this row, extract teh damage curve
#=======================================================================
depth_list = []
dmg_list = []
for index, entry in enumerate(dcurve_raw_list): #loop through each entry
'the syntax of these curves is very strange'
#===================================================================
# logic for non depth/damage entries
#===================================================================
if index <=1: continue #skip the first 2
if not hp.basic.isnum(entry): continue #skip non number
#===================================================================
# logic to sort depth from damage based on even/odd
#===================================================================
if index%2 == 0: depth_list.append(float(entry))
else: dmg_list.append(float(entry))
""" thsi even/odd index selectio may not work for non house type damage curves
"""
#=======================================================================
# Build array
#=======================================================================
dd_ar = np.sort(np.array([depth_list, dmg_list]), axis=1)
""" moved this to make parent reference more accessible
dd_ar[1] = dd_ar1[1] * self.parent.gis_area"""
#checks
if self.db_f:
logger.debug('got \n depth_list: %s \n dmg_list: %s'%(depth_list, dmg_list))
if not len(depth_list) == len(dmg_list): #check length
self.logger.error('depth/fdmg lists do not match')
""" these should both be 11
[0.0, 0.1, 0.30000000000000004, 0.6000000000000001, 0.9, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7]
"""
raise IOError
if not len(depth_list) == 11:
raise IOError
if not dd_ar.shape[0] == 2:
self.logger.warning('got unexpected shape on damage array: %s'%str(dd_ar.shape))
'getting 3.208 at the end of the depth list somehow'
raise IOError
#=======================================================================
# closeout
#=======================================================================
#self.dd_ar = dd_ar
logger.debug('built damage array from rfda for hse_type \'%s\' and dmg_type \'%s\' as %s'
%(hse_type, dmg_type, str(dd_ar.shape)))
return dd_ar
def get_ddar_depdmg(self): #build the dd_ar from standard format depth damage tables
logger = self.logger.getChild('get_ddar_depdmg')
#=======================================================================
# get your data from the session
#=======================================================================
df = self.model.dfunc_raw_d[self.name]
dd_ar = np.sort(df.values, axis=1)
logger.debug('build dd_ar from passed file for with %s'%(str(dd_ar.shape)))
return dd_ar
def get_ddar_dfeats(self): #build the dd_ar from the dmg_feats
"""
#=======================================================================
# CALLS
#=======================================================================
build_dd_ar (for dfeats)
recompile_ddar (called by handles)
never directly called by handles
#=======================================================================
# OUTPUTS
#=======================================================================
dd_ar: depth/damage (total)
NOTE: This is different then the dd_ar for rfda curves
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_ddar_dfeats')
dd_df = self.childmeta_df #ge tthe dynamic depth damage frame
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not hp.pd.isdf(dd_df):
raise IOError
if np.any( | pd.isnull(dd_df['calc_price']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from time import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-q', action="store", dest="qrel_file", help="qrel train file")
parser.add_argument('-t', action="store", dest="top1000_file", help="top1000 train file")
parser.add_argument('-d', action="store", dest="data_file_small", help="query passage file (output)")
parser.add_argument('-i', action="store", dest="data_id_file_small", help="query_id passage_id file (output)")
parser.add_argument('-a', action="store", dest="data_file_large", help="query passage file (output)")
parser.add_argument('-b', action="store", dest="data_id_file_large", help="query_id passage_id file (output)")
results = parser.parse_args()
qrel_file = results.qrel_file
top1000_file = results.top1000_file
data_file_small = results.data_file_small
data_id_file_small = results.data_id_file_small
data_file_large = results.data_file_large
data_id_file_large = results.data_id_file_large
df = | pd.read_csv(qrel_file, delimiter='\t', header=None) | pandas.read_csv |
import os
from typing import Any, Callable
import flask
import joblib
import pandas as pd
from sklearn.pipeline import Pipeline
def create_predict_handler(
path: str = os.getenv("MODEL_PATH", "data/pipeline.pkl"),
) -> Callable[[flask.Request], flask.Response]:
"""
This function loads a previously trained model and initialises response labels.
If then wraps an 'inner' handler function (ensuring above model and response labels
are in scope for the wrapped function, and that each is initialised exactly once at
runtime).
Parameters
----------
path: str
A path to the target model '.joblib' file.
Returns
-------
"""
model: Pipeline = joblib.load(path)
statuses = {0: "clear", 1: "heart-disease"}
def handler(request: flask.Request) -> Any:
request_json = request.get_json()
df = | pd.DataFrame.from_records([request_json]) | pandas.DataFrame.from_records |
#!/usr/bin/env python3
import atddm
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pytz
# from datetime import time
from constants import COLORS, TZONES, CODES, BEGDT, ENDDT
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
r = robjects.r
TRUE = robjects.BoolVector([True])
FALSE = robjects.BoolVector([False])
pandas2ri.activate()
dgof = importr('dgof')
dweib = importr('DiscreteWeibull')
def format_time_interval(t1, t2):
return '{h1:02d}:{m1:02d}--{h2:02d}:{m2:02d}'.format(h1=t1.hour,
m1=t1.minute,
h2=t2.hour,
m2=t2.minute)
def formatter_float_n_digits(x, n):
return '{x:.{n}f}'.format(x=x, n=n)
def ff3(x):
return formatter_float_n_digits(x, 3)
def ifelse_formatter(x):
return formatter_float_n_digits(x, 2) if x >= 0.01 else '<0.01'
sns.set(style="whitegrid", context='paper')
BEGDT = pd.Timestamp(BEGDT)
ENDDT = pd.Timestamp(ENDDT)
INTERVAL = 10
NREPS = 300
dd = atddm.load(subset=CODES)
interarrivals = {}
# TIMES_LOC = [pd.Timestamp('07:00:00'),
# pd.Timestamp('10:00:00'),
# pd.Timestamp('13:00:00'),
# pd.Timestamp('16:00:00')]
# BEGTM_LOC = TIMES_LOC[:-1]
# ENDTM_LOC = TIMES_LOC[1:]
BEGTM_LOC = [ | pd.Timestamp('08:00:00') | pandas.Timestamp |
import flask
from flask import request, jsonify
import numpy as np
import pandas as pd
import json
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from newsapi import NewsApiClient
api = NewsApiClient(api_key='0924f039000046a99a08757a5b122a4c')
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<h1>Distant Reading Archive</h1><p>This site is a prototype API for distant reading of science fiction novels.</p>"
@app.route('/stock/<ticker>', methods=['GET'])
def stock(ticker):
myList = api.get_everything(q=ticker)['articles'][:100]
news_titles = []
for x in myList:
news_titles.append(x['title'])
df = pd.DataFrame([], columns = ['TKR', 'Headline'])
df['Headline'] = news_titles
df['TKR'] = ticker
vader = SentimentIntensityAnalyzer()
scores = df['Headline'].apply(vader.polarity_scores).tolist()
scores_df = | pd.DataFrame(scores) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 12:29:19 2019
@author: sdenaro
"""
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
import numpy.matlib as matlib
import seaborn as sns
from sklearn import linear_model
#from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
#Set Preference Customers reduction percent (number)
custom_redux=0
# Yearly firm loads (aMW)
# upload BPA firm load column from file
df_load=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=0,skiprows=[0,1], usecols=[9])
#Save as Preference Firm (PF), Industrial Firm (IF) an Export (ET)
PF_load_y=df_load.loc[[13]].values - custom_redux*df_load.loc[[13]].values
IP_load_y=df_load.loc[[3]].values - custom_redux* df_load.loc[[3]].values
ET_load_y=df_load.loc[[14]]
# Hourly hydro generation from FCRPS stochastic simulation
#df_hydro=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/PNW_hydro/FCRPS/BPA_owned_dams.csv', header=None)
df_hydro=pd.read_csv('new_BPA_hydro_daily.csv', usecols=([1]))
BPA_hydro=pd.DataFrame(data=df_hydro.loc[0:365*1200-1,:].sum(axis=1)/24, columns=['hydro'])
BPA_hydro[BPA_hydro>45000]=45000
#Remove CAISO bad_years
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365,1200), order='F'))
BPA_hydro.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#reshuffle
#BPA_hydro[[1, 122, 364, 543]]=BPA_hydro[[16, 126, 368, 547]]
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365*1188), order='F'))
# Yearly resources other than hydro (aMW)
df_resources=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=1,skiprows=[0,1], usecols=[9])
Nuc_y=df_resources.loc[[7]]
Wind_y=df_resources.loc[[8]]
Purch_y=df_resources.loc[[10]]
# Yearly costs and monthly rates (Oct-Sep)
costs_y=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=3,skiprows=[0,3,4,5], usecols=[8])*pow(10,3)
PF_rates=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=4,skiprows=np.arange(13,31), usecols=[0,7])
PF_rates.columns=['month','2018']
IP_rates=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=5,skiprows=np.arange(13,31), usecols=[0,7])
IP_rates.columns=['month','2018']
#load BPAT hourly demand and wind and convert to daily
df_synth_load=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv', usecols=[1])
BPAT_load=pd.DataFrame(np.reshape(df_synth_load.values, (24*365,1200), order='F'))
base = dt(2001, 1, 1)
arr = np.array([base + timedelta(hours=i) for i in range(24*365)])
BPAT_load.index=arr
BPAT_load=BPAT_load.resample('D').mean()
BPAT_load.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#reshuffle
#BPAT_load[[1, 122, 364, 543]]=BPAT_load[[16, 126, 368, 547]]
BPAT_load=pd.DataFrame(np.reshape(BPAT_load.values, (365*1188), order='F'))
df_synth_wind=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv', usecols=[1])
BPAT_wind=pd.DataFrame(np.reshape(df_synth_wind.values, (24*365,1200), order='F'))
BPAT_wind.index=arr
BPAT_wind=BPAT_wind.resample('D').mean()
BPAT_wind.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#BPAT_wind[[1, 122, 364, 543]]=BPAT_wind[[16, 126, 368, 547]]
BPAT_wind=pd.DataFrame(np.reshape(BPAT_wind.values, (365*1188), order='F'))
# Calculate daily BPAT proportions for demand and wind
load_ratio=BPAT_load/BPAT_load.mean()
wind_ratio=BPAT_wind/BPAT_wind.mean()
# Derive daily BPA loads and other resources
y=2018
PF_load=pd.DataFrame(PF_load_y*load_ratio)
PF_load_avg=(np.reshape(PF_load.values, (365,1188), order='F')).sum(axis=0).mean()
IP_load=pd.DataFrame(IP_load_y*load_ratio)
IP_load_avg=(np.reshape(IP_load.values, (365,1188), order='F')).sum(axis=0).mean()
ET_load=pd.DataFrame(ET_load_y.loc[14,y]*load_ratio)
Purch=pd.DataFrame(Purch_y.loc[10,y]*load_ratio)
Wind=pd.DataFrame(Wind_y.loc[8,y]*wind_ratio)
Nuc=pd.DataFrame(data=np.ones(len(Wind))*Nuc_y.loc[7,y], index=Wind.index)
# STOCHASTIC MIdC and California daily prices
#MidC=pd.read_csv('../../CAPOW/CAPOW_SD/UCED/LR/MidC_daily_prices.csv').iloc[:, 1:]
MidC=pd.read_csv('MidC_daily_prices_new.csv').iloc[:, 1]
MidC=pd.DataFrame(np.reshape(MidC.values, (365,1200), order='F'))
MidC.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#reshuffle
#MidC[[1, 122, 364, 543]]=MidC[[16, 126, 368, 547]]
MidC=pd.DataFrame(np.reshape(MidC.values, (365*1188), order='F'))
CAISO=pd.read_csv('../../CAPOW/CAPOW_SD/UCED/LR/CAISO_daily_prices.csv').iloc[:, 1:]
#reshuffle
#CAISO[['1', '122', '364', '543']]=CAISO[['16', '126', '368', '547']]
CAISO=pd.DataFrame(np.reshape(CAISO.values, (365*1188), order='F'))
Wholesale_Mkt= | pd.concat([MidC,CAISO], axis=1) | pandas.concat |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import subprocess
import os
import re
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from attrdict import AttrDict
from tqdm import tqdm
import argparse
import collections
import logging
import json
import re
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from externals.bert.pytorch_extract_features import InputExample, convert_examples_to_features
class BERTFeaturesV2(BaseEstimator, TransformerMixin):
def __init__(self, model='bert-large-uncased', use_cuda=True):
self.model = model
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
self.args = args = AttrDict({
'bert_model': self.model,
'do_lower_case': True,
'layers': "-1,-2,-3,-4",
'max_seq_length': 512,
'batch_size': 2,
'local_rank': -1,
'no_cuda': not use_cuda
})
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))
print('loading from model')
model = BertModel.from_pretrained('results/bert_finetuned/lm/', cache_dir='results/bert_finetuned/lm/')
print('loaded model')
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
model.eval()
self.device = device
self.model = model
def transform(self, X):
tokenizer = BertTokenizer.from_pretrained(self.args.bert_model, do_lower_case=self.args.do_lower_case, cache_dir='tmp/')
examples = []
for idx, row in X.iterrows():
examples.append(InputExample(unique_id=idx, text_a=row.text, text_b=None))
features = convert_examples_to_features(
examples=examples, seq_length=self.args.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if self.args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
layer_indexes = [int(x) for x in self.args.layers.split(",")]
output = []
for input_ids, input_mask, example_indices in tqdm(eval_dataloader):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
all_encoder_layers, _ = self.model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for b, example_index in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
tokens = []
layers = [[] for _ in layer_indexes]
all_out_features = []
for (i, token) in enumerate(feature.tokens):
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers[j].append([round(x.item(), 6) for x in layer_output[i]])
tokens.append(token)
output.append([tokens, *layers])
output = pd.DataFrame(output, columns=['tokens', *['layer_{}'.format(idx) for idx in layer_indexes]])
res = []
for idx, row in X.iterrows():
res.append(self.get_sample_props(output.loc[idx], layer_indexes, **row)[1:])
res = pd.DataFrame(res, columns=['tokens', 'pronoun_offset_token',
'a_offset_token', 'b_offset_token', 'a_span',
'b_span', 'pronoun_token', 'a_tokens', 'b_tokens', 'bert', 'cls'])
cols = set(X.columns).difference(res.columns)
return {'X': pd.concat([X[cols], res], axis=1)}
def get_sample_props(self, features, layer_indexes, text, pronoun, a, b, pronoun_offset, a_offset, b_offset, **kwargs):
cls = [features['layer_{}'.format(idx)][0] for idx in layer_indexes]
tokens = features['tokens'][1:-1]
embs = [features['layer_{}'.format(idx)][1:-1] for idx in layer_indexes]
#assuming only whitespaces have been removed from text
# bert tokens have some hashes for word piece
assert len(''.join(tokens).replace('##', '')) == len(text.replace(' ', '')), ([token.replace('##', '') for token in tokens], text.split(' '))
idx = [0] + list(map(lambda x: len(x.replace('##', '')), tokens))
idx = np.cumsum(idx).tolist()
a_end_idx = a_offset+len(a)
b_end_idx = b_offset+len(b)
pronoun_offset = idx.index(len(text[:pronoun_offset].replace(' ', '')))
pronoun_token = tokens[pronoun_offset]
a_offset = idx.index(len(text[:a_offset].replace(' ', '')))
token_end = idx.index(len(text[:a_end_idx].replace(' ', '')))-1
a_span = [a_offset, token_end]
a_tokens = tokens[a_offset:token_end+1]
b_offset = idx.index(len(text[:b_offset].replace(' ', '')))
token_end = idx.index(len(text[:b_end_idx].replace(' ', '')))-1
b_span = [b_offset, token_end]
b_tokens = tokens[b_offset:token_end+1]
return tokens, tokens, pronoun_offset, a_offset, b_offset, a_span, b_span, pronoun_token, a_tokens, b_tokens, embs, cls
class BERTFeatures(BaseEstimator, TransformerMixin):
def __init__(self, model='uncased_L-12_H-768_A-12'):
self.model = model
def transform(self, X):
def cleanser(row):
pronoun_offset = row.pronoun_offset
a_offset = row.a_offset
b_offset = row.b_offset
text = row.text.replace("`", "'")
matches = re.findall('\([^)]*\)', text)
for match in matches:
if row.a in match or row.b in match or row.pronoun in match:
continue
if text.index(match) < pronoun_offset:
pronoun_offset -= len(match)
if text.index(match) < a_offset:
a_offset -= len(match)
if text.index(match) < b_offset:
b_offset -= len(match)
text = text.replace(match, '', 1)
return text, pronoun_offset, a_offset, b_offset
# X[['text', 'pronoun_offset', 'a_offset', 'b_offset']] = X.apply(cleanser, axis=1, result_type='expand')
# X.text.to_csv('tmp/input.txt', index = False, header = False, quoting=csv.QUOTE_NONE)
with open('tmp/input.txt', 'w') as f:
f.write('\n'.join(X.text.values.tolist()))
cmd = "cd bert && python3 extract_features.py \
--input_file=../tmp/input.txt \
--output_file=../tmp/output.jsonl \
--vocab_file={0}/vocab.txt \
--bert_config_file={0}/bert_config.json \
--init_checkpoint={0}/bert_model.ckpt \
--layers=-1 \
--max_seq_length=512 \
--batch_size=32".format(self.model)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in iter(p.stdout.readline, ''):
print(line)
retval = p.wait()
bert_output = pd.read_json("tmp/output.jsonl", lines = True)
res = []
for idx, row in X.iterrows():
features = | pd.DataFrame(bert_output.loc[idx,"features"]) | pandas.DataFrame |
"""
KAMA: Kaufmans Adaptive Moving Average.
"""
import pyximport; pyximport.install()
from datautils import gen_closes
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import Series
def kama(x, n=10, pow1=2, pow2=30):
"""KAMA: Kaufmans Adaptive Moving Average.
Params:
x (Series): Time series data such as close prices.
n (int): number of periods for the Efficiency Ratio (ER).
pow1 (int): number of periods for the fastest EMA constant.
pow2 (int): number of periods for the slowest EMA constant.
Returns:
Series: Kaufmans adaptive moving average of x.
"""
nan_count = x[pd.isnull(x)].size
x = Series(x.dropna().values, name = x.name, index = x.index)
change = (x - x.shift(n)).abs()
volatility = (x - x.shift(1)).abs().rolling(window=n).sum()
er = (change / volatility).values
sc = (er * (2.0 / (pow1 + 1.0) - 2.0 / (pow2 + 1.0)) + 2.0 / (pow2 + 1.0)) ** 2.0
values = x.values
kama = [np.nan] * sc.size
first_value = True
for i in range(len(kama)):
if not pd.isnull(sc[i]):
if first_value:
kama[i] = values[i]
first_value = False
else:
kama[i] = kama[i-1] + sc[i] * (values[i] - kama[i-1])
return Series(data = [np.nan] * nan_count + kama, name = "kama(%d,%d,%d)" % (n, pow1, pow2), index = x.index)
def test_kama(closes):
"""KAMA test function."""
kama10 = kama(closes, 10, 2, 30)
data = | pd.concat([closes, kama10], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Division By Zero
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_df_radd_str_invalid(self, dtype, data):
df = pd.DataFrame(data, dtype=dtype)
with pytest.raises(TypeError):
'foo_' + df
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_int(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([2, 3, 4], dtype=dtype)
result = 1 + df
tm.assert_frame_equal(result, expected)
result = df + 1
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_nan(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
result = np.nan + df
tm.assert_frame_equal(result, expected)
result = df + np.nan
tm.assert_frame_equal(result, expected)
def test_df_radd_str(self):
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, | pd.DataFrame(['ax', np.nan, 'ax']) | pandas.DataFrame |
from os import listdir
from os.path import isfile, join
import re
import nltk
from nltk.corpus import stopwords
from string import punctuation
import pymorphy2
import pandas
from collections import Counter
from collections import defaultdict, OrderedDict
import math
import numpy
# nltk.download("stopwords") # used only for first time
russian_stopwords = stopwords.words("russian")
morgh = pymorphy2.MorphAnalyzer()
# def to_normal_form(word):
# p = morgh.parse(word)[0]
# print(p.normal_form)
# return p.normal_form
# Подсчитать tf каждого термина
def computeTF(wordDict, bow):
tfDict = {}
bowCount = len(bow)
for word, count in wordDict.items():
tfDict[word] = count/float(bowCount)
return tfDict
# Подсчитать idf
def computeIDF(docList):
import math
idfDict = {}
N = len(docList)
idfDict = dict.fromkeys(docList[0].keys(), 0)
for doc in docList:
for word, val in doc.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log10(N / float(val))
return idfDict
# Подсчитать tf-idf
def computeTFIDF(tfBow, idfs):
tfidf = {}
for word, val in tfBow.items():
tfidf[word] = val*idfs[word]
return tfidf
files_path = '../files'
files = [f for f in listdir(files_path) if isfile(join(files_path, f))]
print(files)
files_words = []
for file_name in files:
file = open(files_path + '/' + file_name, "r", encoding="utf-8")
file_content = file.read().replace('<b>', ' ')
sentence = re.sub(r"[\n\s.,:–\\?—\-!()/«»'#№{}\[\]→%|+®©\"]+", " ", file_content, flags=re.UNICODE).lower()
sentence = re.sub(r"[\d+]", "", sentence, flags=re.UNICODE)
tokens = [token for token in sentence.split(" ") if token not in russian_stopwords \
and token != " " \
and token.strip() not in punctuation]
files_words.append(tokens)
wordSet = set([item for sublist in files_words for item in sublist])
fileWordDictionaries = []
for i in range(len(files_words)):
fileWordDictionaries.append(dict.fromkeys(wordSet,0))
for word in files_words[i]:
fileWordDictionaries[i][word] += 1
df = pandas.DataFrame(fileWordDictionaries)
tfDictionaries = []
for i in range(len(fileWordDictionaries)):
tfDictionaries.append(computeTF(fileWordDictionaries[i],files_words[i]))
df_TF = | pandas.DataFrame(tfDictionaries) | pandas.DataFrame |
from time import time
from os import path, listdir
from datetime import timedelta
from datetime import date as dt_date
from datetime import datetime as dt
from numpy import cumprod
from pandas import DataFrame, read_sql_query, read_csv, concat
from functions import psqlEngine
class Investments():
def __init__(self, path = '../investments/', name = 'get_investments', **kwargs):
self.kwargs = kwargs
self.path = path
self.hyperparameters()
self.get_engine()
self.get_dollar()
self.get_all_assets()
self.domestic_bond_returns()
self.get_benchmarks()
self.portfolio_domestic_stocks = self.get_quotas('domestic_stocks')
self.portfolio_international_stocks = self.get_quotas('international_stocks')
self.portfolio_crypto = self.get_quotas('crypto')
# self.portfolio_domestic_options = self.get_quotas('domestic_options')
self.portfolio_domestic_funds = self.get_quotas('domestic_funds')
self.get_portfolio()
self.get_aggregate()
self.get_time_series()
self.dispose_engine()
def __call__(self, flag = 'assets'):
if flag == 'dollar':
return self.dollar
if flag == 'bonds':
return self.domestic_bonds, self.interests
if flag == 'stocks':
return self.domestic_tickers, self.international_tickers
if flag == 'crypto':
return self.crypto, self.fractions
if flag == 'portfolio':
return self.portfolio, self.portfolio_aggregate.round(2)
if flag == 'save':
rounded = self.portfolio.round(2)
rounded2 = self.portfolio_aggregate.round(2)
engine = psqlEngine(self.database)
connection = engine.connect()
rounded.to_sql('portfolio', connection, if_exists = 'replace', index = False)
rounded2.to_sql('aggregate', connection, if_exists = 'replace', index = False)
connection.close()
engine.dispose()
if flag == 'time_series':
return self.portfolio_time_series.round(2)
def hyperparameters(self):
self.database = self.kwargs.get('database', 'database.ini')
self.benchmark_database = self.kwargs.get('benchmarks_database', 'benchmarks')
self.domestic_stocks_database = self.kwargs.get('domestic_database', 'brazil_stocks')
self.domestic_options_database = self.kwargs.get('domestic_database', 'brazil_options')
self.international_database = self.kwargs.get('international_database', 'usa_stocks')
self.currency_database = self.kwargs.get('currency_database', 'currencies')
self.domestic_bonds_path = '{}bonds/'.format(self.path)
self.crypto_path = '{}crypto/'.format(self.path)
self.domestic_stocks_path = '{}stocks/domestic/'.format(self.path)
self.international_stocks_path = '{}stocks/international/'.format(self.path)
self.domestic_options_path = '{}options/domestic/'.format(self.path)
self.domestic_funds_path = '{}funds/domestic/'.format(self.path)
self.list_paths = [
self.domestic_bonds_path,
self.crypto_path,
self.domestic_stocks_path,
self.international_stocks_path,
self.domestic_options_path,
self.domestic_funds_path,
]
self.dates_min = DataFrame()
def get_engine(self):
self.engine = psqlEngine(self.database)
self.connection = self.engine.connect()
def dispose_engine(self):
self.connection.close()
self.engine.dispose()
def get_dollar(self):
currency = 'BRLUSD'
self.dollar = float(read_sql_query("SELECT * FROM {} WHERE ticker = '{}'".format(self.benchmark_database, currency), self.connection).iloc[0].close)
self.dollar_full = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.benchmark_database, currency), self.connection)
self.dollar_full.drop_duplicates('date', inplace = True)
self.dollar_full = self.insert_weekends(self.dollar_full)
self.dollar_full.rename(columns = {'close': 'dollar_close'}, inplace = True)
self.dollar_full['dollar_close'] = self.dollar_full.dollar_close.astype('float')
def get_benchmarks(self):
self.spy = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'SPY' ORDER BY date".format(self.benchmark_database), self.connection)
self.bova = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'BOVA11' ORDER BY date".format(self.benchmark_database), self.connection)
self.spy.drop_duplicates('date', inplace = True)
self.bova.drop_duplicates('date', inplace = True)
self.spy = self.insert_weekends(self.spy)
self.spy['close'] = self.spy.close.astype('float')
self.bova = self.insert_weekends(self.bova)
self.bova = self.bova.merge(self.dollar_full, on = 'date')
self.bova['close'] = self.bova.close.astype('float')
self.bova['close_dollar'] = (self.bova.close * self.bova.dollar_close).to_list()
def get_all_assets(self):
self.interests, self.fractions = list(), list()
self.domestic_tickers, self.international_tickers = list(), list()
self.domestic_options_tickers = list()
self.domestic_funds_tickers = list()
for directory in self.list_paths:
list_files = list()
for filename in listdir(directory):
if filename.endswith('.csv'):
list_files.append(path.join(directory, filename))
if directory == self.domestic_bonds_path:
self.interests.append(filename.replace('.csv', '').upper())
if directory == self.crypto_path:
self.fractions.append(filename.replace('.csv', '').upper())
if directory == self.domestic_stocks_path:
self.domestic_tickers.append(filename.replace('.csv', '').upper())
if directory == self.international_stocks_path:
self.international_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_options_path:
self.domestic_options_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_funds_path:
self.domestic_funds_tickers.append(filename.replace('.csv', '').upper())
dictionary = dict()
if directory == self.domestic_bonds_path:
for filename, interest in zip(list_files, self.interests):
df = read_csv(filename)
dictionary[interest] = df
if dictionary:
self.domestic_bonds = concat(dictionary)
self.domestic_bonds = self.domestic_bonds.rename(columns = {'pct_cdi': 'share'})
self.domestic_bonds = self.domestic_bonds.merge(self.dollar_full, on = 'date')
self.domestic_bonds['purchase_price_dollar'] = (self.domestic_bonds.purchase_price.astype('float') * self.domestic_bonds.dollar_close.astype('float')).to_list()
else:
if directory == self.crypto_path:
symbols = self.fractions
if directory == self.domestic_stocks_path:
symbols = self.domestic_tickers
if directory == self.international_stocks_path:
symbols = self.international_tickers
if directory == self.domestic_options_path:
symbols = self.domestic_options_tickers
if directory == self.domestic_funds_path:
symbols = self.domestic_funds_tickers
for filename, ticker in zip(list_files, symbols):
df = read_csv(filename)
if ticker in self.domestic_funds_tickers:
df.set_index('date', inplace = True)
df['purchase_price'] = df.purchase_price.diff()
df = df.dropna()
df.reset_index(inplace = True)
if (ticker in self.domestic_tickers) or (ticker in self.domestic_options_tickers) or (ticker in self.domestic_funds_tickers):
df = df.merge(self.dollar_full, on = 'date')
df['purchase_price'] = df.purchase_price.astype('float') * df.dollar_close.astype('float')
dictionary[ticker] = df
df['cum_share'] = df.share.cumsum()
df['price_share'] = (df.purchase_price / df.share)
df['cum_price_share'] = df.price_share.expanding().mean()
dictionary[ticker] = df
if dictionary:
self.stocks = concat(dictionary)
if directory == self.crypto_path:
self.crypto = concat(dictionary)
if directory == self.domestic_stocks_path:
self.domestic_stocks = concat(dictionary)
if directory == self.international_stocks_path:
self.international_stocks = concat(dictionary)
if directory == self.domestic_options_path:
self.domestic_options = concat(dictionary)
if directory == self.domestic_funds_path:
self.domestic_funds = concat(dictionary)
def get_quotas(self, asset):
quotas = dict()
domestic = False
if asset == 'crypto':
list_tickers = self.fractions
db = self.currency_database
if asset == 'domestic_stocks':
list_tickers = self.domestic_tickers
db = self.domestic_stocks_database
domestic = True
if asset == 'international_stocks':
list_tickers = self.international_tickers
db = self.international_database
if asset == 'domestic_options':
list_tickers = self.domestic_options_tickers
db = self.domestic_options_database
domestic = True
if asset == 'domestic_funds':
list_tickers = self.domestic_funds_tickers
domestic = True
for ticker in list_tickers:
key = ticker.upper()
if asset == 'crypto':
quotas[key] = self.crypto.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_stocks':
quotas[key] = self.domestic_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'international_stocks':
quotas[key] = self.international_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_options':
quotas[key] = self.domestic_options.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_funds':
quotas[key] = 1.
portfolio = DataFrame({
'asset': list(quotas.keys()),
'quotas': list(quotas.values())
})
portfolio.sort_values(by = ['asset'], inplace = True)
if asset == 'domestic_funds':
value_usd, value_brl = list(), list()
for asset in list_tickers:
close_price = read_csv(self.domestic_funds_path + '{}.csv'.format(asset.lower())).share.iloc[-1]
value_usd.append(close_price * quotas.get(asset) * self.dollar)
value_brl.append(close_price * quotas.get(asset))
portfolio['value_usd'] = value_usd
portfolio['value_brl'] = value_brl
else:
if domestic == False:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
else:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, adjusted_close as close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
close_price['close'] = close_price.close.astype('float')
close_price = close_price.loc[close_price.ticker.isin(portfolio.asset.to_list())]
self.dates_min = self.dates_min.append(close_price[['date', 'ticker']])
close_price['quota'] = close_price.ticker.apply(lambda x: quotas.get(x))
if domestic == False:
portfolio['value_usd'] = (close_price.close * close_price.quota).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota / self.dollar).to_list()
else:
portfolio['value_usd'] = (close_price.close * close_price.quota * self.dollar).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota).to_list()
portfolio.sort_values(by = ['value_usd'], ascending = False, inplace = True)
return portfolio
def get_portfolio(self):
self.portfolio = dict()
self.portfolio['domestic bonds'] = self.portfolio_bonds
self.portfolio['domestic stocks'] = self.portfolio_domestic_stocks
self.portfolio['international stocks'] = self.portfolio_international_stocks
self.portfolio['crypto'] = self.portfolio_crypto
# self.portfolio['domestic options'] = self.portfolio_domestic_options
self.portfolio['domestic funds'] = self.portfolio_domestic_funds
self.portfolio = concat(self.portfolio)
self.portfolio = self.portfolio.loc[self.portfolio.quotas >= 1e-10]
def get_aggregate(self):
assets = list(self.portfolio.index.unique(level = 0))
value_brl, value_usd = list(), list()
for asset in assets:
value_brl.append(self.portfolio.loc[asset].sum().value_brl)
value_usd.append(self.portfolio.loc[asset].sum().value_usd)
self.portfolio_aggregate = DataFrame({
'asset': assets,
'value_brl': value_brl,
'value_usd': value_usd,
})
def insert_weekends(self, df, asset = 'stock'):
df.set_index('date', inplace = True)
start, end = df.index[0], df.index[-1]
start = dt.strptime(start, '%Y-%m-%d').date()
end = dt.strptime(end, '%Y-%m-%d').date()
dates = [str(start + timedelta(days = x)) for x in range(0, (end - start).days + 1, 1)]
df = df.reindex(dates, fill_value = 0)
df.reset_index(inplace = True)
close = list()
if asset == '6040':
for value in df.interest:
if value != 0:
close.append(value)
if value == 0:
close.append(1.)
df['interest'] = close
if asset == 'bond':
for value in df.portfolio:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['portfolio'] = close
if asset == 'crypto':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
if asset == 'stock':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
return df
def get_concat_dataframe(self, columns, options = True):
columns_bonds = list()
for elem in columns:
if elem == 'share':
columns_bonds.append('purchase_price')
elif elem == 'purchase_price':
columns_bonds.append('purchase_price_dollar')
else:
columns_bonds.append(elem)
domestic_bonds = dict()
domestic_bonds['CDB'] = self.domestic_bonds[columns_bonds].rename(columns = {'purchase_price_dollar': 'purchase_price'})
domestic_bonds = concat(domestic_bonds)
if options == True:
df = | concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns], self.domestic_options[columns]]) | pandas.concat |
#! /bin/python3
# compute_PCs.py takes
# RUN ON MASTODON--doesn't have memory issues there
# want local mean with #of non-missing values in either direction
# local mean with max number of positions to search in either direction
# global mean
# global mean by category
# filter out cases where there is no methylation (0 or 1 methylated values) and lots of missing values
import pandas as pd
import argparse
import os
from sklearn.decomposition import IncrementalPCA
def read_data(file_path):
'''
Parameters
----------
file_path : str
relative path to one chromosome
Returns
-------
'''
df_raw = pd.read_csv(file_path, sep = "\t")
if 'methylation_estimate' not in df_raw:
# point estimate for methylationg
df_raw['methylation_estimate'] = df_raw['methylated'] / df_raw['coverage']
df_raw.drop(columns=['methylated','chr', 'unmethylated'], inplace = True)
df_raw = df_raw.astype({'sample': 'uint8', 'methylation_estimate': 'float32', 'coverage': 'uint8'})
df = (df_raw.pivot_table(index=['pos'], columns=['sample'], values=['methylation_estimate', 'coverage']))
return df
def filter_too_many_nulls(df):
"""Drops positions that are more than half nulls
"""
num_na = df.isnull().sum(axis=1)
mean_val = df.sum(axis=1)
ix = (num_na < num_na_cut) & (mean_val > mean_cut)
return df[ix]
def impute_local_mean(df, group_ix, ws=50):
# Designed with methylated reads and coverage in mind...
'''imputes local mean by borrowin across groups
Args:
df: a data frame with
group_ix = same length as number of columns in df
'''
# Either mean then mean, or
# Minimum periods :=
mp = max(10, int(ws / 10))
df.rolling(window = ws, min_periods = mp)
return(None)
def run_pca(X, num_components = 2, is_incremental=True):
'''computes principal components incremntally
'''
#TODO: allow for normal PCA
ipca = IncrementalPCA(n_components = num_components, batch_size=10000)
X_ipca = ipca.fit_transform(X)
return X_ipca, ipca.explained_variance_ratio_
if __name__ == "__main__":
# argparsing,...
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--ifile', default = '../../data/cov-meth/chr22.tsv') #TODO: change to CSV in extract...
parser.add_argument('--odir', default = '../../data/prin-comps-array-samples/')
parser.add_argument('--filter_samples', action = 'store_true')
parser.add_argument('--filter_file', default = '../../data/meta/array-samples.csv')
args = parser.parse_args()
if not os.path.exists(args.odir):
os.makedirs(args.odir)
if args.filter_samples:
tmp = | pd.read_csv(args.filter_file) | pandas.read_csv |
# Name : <NAME>
# Roll Number : 101903508
import pandas as pd
import os
import sys
def main():
if len(sys.argv) != 5:
print("ERROR : incorrect number of parameters")
sys.exit(1)
elif not os.path.isfile(sys.argv[1]):
print(f"ERROR : {sys.argv[1]} Don't exist!!")
sys.exit(1)
elif ".csv" != (os.path.splitext(sys.argv[1]))[1]:
print(f"ERROR : csv '{sys.argv[1]}' not found!!")
sys.exit(1)
else:
dataset, temp_dataset = pd.read_csv(
sys.argv[1]), pd.read_csv(sys.argv[1])
nCol = len(temp_dataset.columns.values)
# less then 3 columns in input dataset
if nCol < 3:
print("ERROR :less then 3 columns")
exit(1)
# Handeling non-numeric value
for i in range(1, nCol):
| pd.to_numeric(dataset.iloc[:, i], errors='coerce') | pandas.to_numeric |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([True, True, True, True])))
def test_is_not_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([False, False, False, False])))
def test_is_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_not_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
one_to_one_related_df = pandas.DataFrame.from_dict(
{
"STUDYID": [1, 2, 3, 1, 2],
"USUBJID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"STUDYDESC": ["Russia", "USA", "China", "Russia", "USA", ],
}
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYID", "comparator": "STUDYDESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYDESC", "comparator": "STUDYID"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--ID", "comparator": "--DESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--DESC", "comparator": "--ID"}
).equals(pandas.Series([True, True, True, True, True]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"TESTID": [1, 2, 1, 3],
"TESTNAME": ["Functional", "Stress", "Functional", "Stress", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTID", "comparator": "TESTNAME"}).equals(pandas.Series([True, False, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTNAME", "comparator": "TESTID"}).equals(pandas.Series([True, False, True, False]))
)
def test_is_not_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
valid_df = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
}
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([False, False, False, False]))
)
valid_df_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": [
"Doctor Consultation", "Heart Surgery", "Doctor Consultation", "Long Lasting Treatment",
],
}
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals( | pandas.Series([False, False, False, False]) | pandas.Series |
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score, GridSearchCV, train_test_split
from sklearn.linear_model import LogisticRegressionCV
from sklearn.feature_selection import RFECV
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler
from sklearn import feature_selection
from sklearn import metrics
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
def predict_round(pred_round):
def load_afl_data(pred_round):
df_2017 = pd.read_csv("../data/afl_results_2017.csv")
#print(df_2017.shape)
df_2018 = pd.read_csv("../data/afl_results_2018.csv")
#print(df_2018.shape)
df_2019 = pd.read_csv("../data/afl_results_2019.csv")
#print(df_2019.shape)
df_2020 = pd.read_csv("../data/afl_results_2020.csv")
#print(df_2020.shape)
df_2021 = pd.read_csv("../data/afl_results_2021.csv")
#print(df_2021.shape)
df_2022 = pd.read_csv("../data/afl_results_2022.csv")
pred_round_results = df_2022[df_2022['round.roundNumber'] == pred_round]
df_2022 = df_2022[df_2022['round.roundNumber'] < pred_round]
#print(df_2022.shape)
df_all = pd.concat([df_2017, df_2018, df_2019, df_2020, df_2021,df_2022], axis=0)
df_all['Date'] = pd.to_datetime(df_all['match.date']).dt.strftime("%Y-%m-%d")
df_players_2017 = pd.read_csv("../data/afl_players_stats_2017.csv")
#print(df_players_2017.shape)
df_players_2018 = pd.read_csv("../data/afl_players_stats_2018.csv")
#print(df_players_2018.shape)
df_players_2019 = pd.read_csv("../data/afl_players_stats_2019.csv")
#print(df_players_2019.shape)
df_players_2020 = pd.read_csv("../data/afl_players_stats_2020.csv")
#print(df_players_2020.shape)
df_players_2021 = pd.read_csv("../data/afl_players_stats_2021.csv")
#print(df_players_2021.shape)
df_players_2022 = pd.read_csv("../data/afl_players_stats_2022.csv")
df_players_2022 = df_players_2022[df_players_2022['Round'] < pred_round]
#print(df_players_2022.shape)
df_players = pd.concat([df_players_2017, df_players_2018, df_players_2019,df_players_2020,df_players_2021,df_players_2022], axis=0)
#print(df_players.shape)
#df_players.columns
df_fixture = pd.read_csv("../data/fixture_2022.csv")
df_next_games_teams = df_fixture[(df_fixture['round.roundNumber'] == pred_round)]
df_next_games_teams = df_next_games_teams[['home.team.name','away.team.name','venue.name','compSeason.year','round.roundNumber']]
df_next_games_teams = df_next_games_teams.rename(columns={'home.team.name': 'match.homeTeam.name', 'away.team.name': 'match.awayTeam.name','compSeason.year':'round.year'})
df_next_games_teams['match.matchId'] = np.arange(len(df_next_games_teams))
return df_all, df_players, df_fixture, df_next_games_teams, pred_round_results
def get_aggregate_player_stats(df=None):
agg_stats = (df.rename(columns={ # Rename columns to lowercase
'Home.team': 'match.homeTeam.name',
'Away.team': 'match.awayTeam.name',
})
.groupby(by=['Date', 'Season', 'match.homeTeam.name', 'match.awayTeam.name'], as_index=False) # Groupby to aggregate the stats for each game
.sum()
#.drop(columns=['DE', 'TOG', 'Match_id']) # Drop columns
.assign(date=lambda df: pd.to_datetime(df.Date, format="%Y-%m-%d")) # Create a datetime object
.sort_values(by='Date')
.reset_index(drop=True))
return agg_stats
df_all, df_players, df_fixture, df_next_games_teams, pred_round_results = load_afl_data(pred_round)
agg_player = get_aggregate_player_stats(df_players)
afl_df = df_all.merge(agg_player, on=['Date', 'match.homeTeam.name', 'match.awayTeam.name'], how='left')
# Add average goal diff for home and away team rolling 4 games
afl_df['HTGDIFF'] = afl_df['homeTeamScore.matchScore.goals'] - afl_df['awayTeamScore.matchScore.goals']
afl_df['ATGDIFF'] = afl_df['awayTeamScore.matchScore.goals'] - afl_df['homeTeamScore.matchScore.goals']
def from_dict_value_to_df(d):
"""
input = dictionary
output = dataframe as part of all the values from the dictionary
"""
df = pd.DataFrame()
for v in d.values():
df = pd.concat([df,v])
return df
def avg_goal_diff(df, avg_h_a_diff, a_h_team, a_h_goal_letter):
"""
input:
df = dataframe with all results
avg_h_a_diff = name of the new column
a_h_team = HomeTeam or AwayTeam
a_h_goal_letter = 'H' for home or 'A' for away
output:
avg_per_team = dictionary with with team as key and columns as values with new column H/ATGDIFF
"""
df[avg_h_a_diff] = 0
avg_per_team = {}
all_teams = df[a_h_team].unique()
for t in all_teams:
df_team = df[df[a_h_team]==t].fillna(0)
result = df_team['{}TGDIFF'.format(a_h_goal_letter)].rolling(4).mean()
df_team[avg_h_a_diff] = result
avg_per_team[t] = df_team
return avg_per_team
d_AVGFTHG = avg_goal_diff(afl_df, 'AVGHTGDIFF', 'match.homeTeam.name', 'H')
df_AVGFTHG = from_dict_value_to_df(d_AVGFTHG)
df_AVGFTHG.sort_index(inplace=True)
d_AVGFTAG = avg_goal_diff(df_AVGFTHG, 'AVGATGDIFF', 'match.awayTeam.name', 'A')
afl_df = from_dict_value_to_df(d_AVGFTAG)
afl_df.sort_index(inplace=True)
afl_df['AVGATGDIFF'].fillna(0, inplace=True)
afl_df['goal_diff'] = afl_df['homeTeamScore.matchScore.goals'] - afl_df['awayTeamScore.matchScore.goals']
for index, row in df_all[df_all['match.status']=='CONCLUDED'].iterrows():
if afl_df['goal_diff'][index] > 0:
afl_df.at[index,'result'] = 1 # 1 is a win
else:
afl_df.at[index,'result'] = 0 # 0 is a loss
def previous_data(df, h_or_a_team, column, letter, past_n):
"""
input:
df = dataframe with all results
a_h_team = HomeTeam or AwayTeam
column = column selected to get previous data from
output:
team_with_past_dict = dictionary with team as a key and columns as values with new
columns with past value
"""
d = dict()
team_with_past_dict = dict()
all_teams = df[h_or_a_team].unique()
for team in all_teams:
n_games = len(df[df[h_or_a_team]==team])
team_with_past_dict[team] = df[df[h_or_a_team]==team]
for i in range(1, past_n):
d[i] = team_with_past_dict[team].assign(
result=team_with_past_dict[team].groupby(h_or_a_team)[column].shift(i)
).fillna({'{}_X'.format(column): 0})
team_with_past_dict[team]['{}_{}_{}'.format(letter, column, i)] = d[i].result
return team_with_past_dict
def previous_data_call(df, side, column, letter, iterations):
d = previous_data(df, side, column, letter, iterations)
df_result= from_dict_value_to_df(d)
df_result.sort_index(inplace=True)
return df_result
df_last_home_results = previous_data_call(afl_df, 'match.homeTeam.name', 'result', 'H', 3)
df_last_away_results = previous_data_call(df_last_home_results, 'match.awayTeam.name', 'result', 'A', 3)
df_last_last_HTGDIFF_results = previous_data_call(df_last_away_results, 'match.homeTeam.name', 'HTGDIFF', 'H', 3)
df_last_last_ATGDIFF_results = previous_data_call(df_last_last_HTGDIFF_results, 'match.awayTeam.name', 'ATGDIFF', 'A', 3)
df_last_AVGFTHG_results = previous_data_call(df_last_last_ATGDIFF_results, 'match.homeTeam.name', 'AVGHTGDIFF', 'H', 2)
df_last_AVGFTAG_results = previous_data_call(df_last_AVGFTHG_results, 'match.awayTeam.name', 'AVGATGDIFF', 'A', 2)
afl_df = df_last_AVGFTAG_results.copy()
all_cols = ['match.matchId','match.date', 'match.status', 'match.venue', 'match.homeTeam.name', 'match.awayTeam.name','venue.name', 'venue.state', 'round.name', 'round.year', 'round.roundNumber', 'status',
'homeTeamScore.rushedBehinds', 'homeTeamScore.minutesInFront',
'homeTeamScore.matchScore.totalScore', 'homeTeamScore.matchScore.goals',
'homeTeamScore.matchScore.behinds',
'homeTeamScore.matchScore.superGoals', 'awayTeamScore.rushedBehinds',
'awayTeamScore.minutesInFront', 'awayTeamScore.matchScore.totalScore',
'awayTeamScore.matchScore.goals', 'awayTeamScore.matchScore.behinds',
'awayTeamScore.matchScore.superGoals', 'weather.tempInCelsius',
'homeTeamScoreChart.goals', 'homeTeamScoreChart.leftBehinds',
'homeTeamScoreChart.rightBehinds', 'homeTeamScoreChart.leftPosters',
'homeTeamScoreChart.rightPosters', 'homeTeamScoreChart.rushedBehinds',
'homeTeamScoreChart.touchedBehinds', 'awayTeamScoreChart.goals',
'awayTeamScoreChart.leftBehinds', 'awayTeamScoreChart.rightBehinds',
'awayTeamScoreChart.leftPosters', 'awayTeamScoreChart.rightPosters',
'awayTeamScoreChart.rushedBehinds', 'awayTeamScoreChart.touchedBehinds',
'HQ1G', 'HQ1B', 'HQ2G',
'HQ2B', 'HQ3G', 'HQ3B', 'HQ4G', 'HQ4B', 'Home.score', 'AQ1G', 'AQ1B',
'AQ2G', 'AQ2B', 'AQ3G', 'AQ3B', 'AQ4G', 'AQ4B', 'Away.score',
'Kicks', 'Marks', 'Handballs', 'Goals', 'Behinds', 'Hit.Outs',
'Tackles', 'Rebounds', 'Inside.50s', 'Clearances', 'Clangers',
'Frees.For', 'Frees.Against', 'Brownlow.Votes', 'Contested.Possessions',
'Uncontested.Possessions', 'Contested.Marks', 'Marks.Inside.50',
'One.Percenters', 'Bounces', 'Goal.Assists', 'Time.on.Ground..',
'Substitute', 'group_id', 'HTGDIFF', 'ATGDIFF', 'AVGHTGDIFF',
'AVGATGDIFF', 'goal_diff', 'result', 'H_result_1', 'H_result_2',
'A_result_1', 'A_result_2', 'H_HTGDIFF_1', 'H_HTGDIFF_2', 'A_ATGDIFF_1',
'A_ATGDIFF_2', 'H_AVGHTGDIFF_1', 'A_AVGATGDIFF_1']
non_feature_cols = ['match.matchId','match.date', 'match.status', 'match.venue', 'match.homeTeam.name', 'match.awayTeam.name','venue.name', 'venue.state', 'round.name', 'round.year', 'round.roundNumber', 'status','Season']
feature_cols = [
'homeTeamScore.rushedBehinds', 'homeTeamScore.minutesInFront',
'homeTeamScore.matchScore.totalScore', 'homeTeamScore.matchScore.goals',
'homeTeamScore.matchScore.behinds',
'homeTeamScore.matchScore.superGoals', 'awayTeamScore.rushedBehinds',
'awayTeamScore.minutesInFront', 'awayTeamScore.matchScore.totalScore',
'awayTeamScore.matchScore.goals', 'awayTeamScore.matchScore.behinds',
'awayTeamScore.matchScore.superGoals', 'weather.tempInCelsius',
'homeTeamScoreChart.goals', 'homeTeamScoreChart.leftBehinds',
'homeTeamScoreChart.rightBehinds', 'homeTeamScoreChart.leftPosters',
'homeTeamScoreChart.rightPosters', 'homeTeamScoreChart.rushedBehinds',
'homeTeamScoreChart.touchedBehinds', 'awayTeamScoreChart.goals',
'awayTeamScoreChart.leftBehinds', 'awayTeamScoreChart.rightBehinds',
'awayTeamScoreChart.leftPosters', 'awayTeamScoreChart.rightPosters',
'awayTeamScoreChart.rushedBehinds', 'awayTeamScoreChart.touchedBehinds',
'HQ1G', 'HQ1B', 'HQ2G',
'HQ2B', 'HQ3G', 'HQ3B', 'HQ4G', 'HQ4B', 'Home.score', 'AQ1G', 'AQ1B',
'AQ2G', 'AQ2B', 'AQ3G', 'AQ3B', 'AQ4G', 'AQ4B', 'Away.score',
'Kicks', 'Marks', 'Handballs', 'Goals', 'Behinds', 'Hit.Outs',
'Tackles', 'Rebounds', 'Inside.50s', 'Clearances', 'Clangers',
'Frees.For', 'Frees.Against', 'Brownlow.Votes', 'Contested.Possessions',
'Uncontested.Possessions', 'Contested.Marks', 'Marks.Inside.50',
'One.Percenters', 'Bounces', 'Goal.Assists', 'Time.on.Ground..',
'Substitute', 'group_id', 'HTGDIFF', 'ATGDIFF', 'AVGHTGDIFF',
'AVGATGDIFF', 'goal_diff', 'result', 'H_result_1', 'H_result_2',
'A_result_1', 'A_result_2', 'H_HTGDIFF_1', 'H_HTGDIFF_2', 'A_ATGDIFF_1',
'A_ATGDIFF_2', 'H_AVGHTGDIFF_1', 'A_AVGATGDIFF_1']
afl_df = afl_df[all_cols]
afl_df = afl_df.rename(columns={col: 'f_' + col for col in afl_df if col not in non_feature_cols})
def create_training_and_test_data(afl_df,df_next_games_teams):
# Define a function which returns a DataFrame with the expontential moving average for each numeric stat
def create_exp_weighted_avgs(df, span):
# Create a copy of the df with only the game id and the team - we will add cols to this df
ema_features = df[['match.matchId', 'match.homeTeam.name']].copy()
feature_names = [col for col in df.columns if col.startswith('f_')] # Get a list of columns we will iterate over
for feature_name in feature_names:
feature_ema = (df.groupby('match.homeTeam.name')[feature_name]
.transform(lambda row: (row.ewm(span=span)
.mean()
.shift(1))))
ema_features[feature_name] = feature_ema
return ema_features
# Define a function which finds the elo for each team in each game and returns a dictionary with the game ID as a key and the
# elos as the key's value, in a list. It also outputs the probabilities and a dictionary of the final elos for each team
def elo_applier(df, k_factor):
# Initialise a dictionary with default elos for each team
elo_dict = {team: 1500 for team in df['match.homeTeam.name'].unique()}
elos, elo_probs = {}, {}
# Get a home and away dataframe so that we can get the teams on the same row
#home_df = df.loc[df.home_game == 1, ['match.homeTeam.name', 'match.matchId', 'f_margin', 'home_game']].rename(columns={'team': 'home_team'})
#away_df = df.loc[df.home_game == 0, ['match.homeTeam.name', 'match.matchId']].rename(columns={'team': 'away_team'})
#df = (pd.merge(home_df, away_df, on='game')
# .sort_values(by='game')
# .drop_duplicates(subset='game', keep='first')
# .reset_index(drop=True))
# Loop over the rows in the DataFrame
for index, row in df.iterrows():
# Get the Game ID
game_id = row['match.matchId']
# Get the margin
margin = row['f_goal_diff']
# If the game already has the elos for the home and away team in the elos dictionary, go to the next game
if game_id in elos.keys():
continue
# Get the team and opposition
home_team = row['match.homeTeam.name']
away_team = row['match.awayTeam.name']
# Get the team and opposition elo score
home_team_elo = elo_dict[home_team]
away_team_elo = elo_dict[away_team]
# Calculated the probability of winning for the team and opposition
prob_win_home = 1 / (1 + 10**((away_team_elo - home_team_elo) / 400))
prob_win_away = 1 - prob_win_home
# Add the elos and probabilities our elos dictionary and elo_probs dictionary based on the Game ID
elos[game_id] = [home_team_elo, away_team_elo]
elo_probs[game_id] = [prob_win_home, prob_win_away]
# Calculate the new elos of each team
if margin > 0: # Home team wins; update both teams' elo
new_home_team_elo = home_team_elo + k_factor*(1 - prob_win_home)
new_away_team_elo = away_team_elo + k_factor*(0 - prob_win_away)
elif margin < 0: # Away team wins; update both teams' elo
new_home_team_elo = home_team_elo + k_factor*(0 - prob_win_home)
new_away_team_elo = away_team_elo + k_factor*(1 - prob_win_away)
elif margin == 0: # Drawn game' update both teams' elo
new_home_team_elo = home_team_elo + k_factor*(0.5 - prob_win_home)
new_away_team_elo = away_team_elo + k_factor*(0.5 - prob_win_away)
# Update elos in elo dictionary
elo_dict[home_team] = new_home_team_elo
elo_dict[away_team] = new_away_team_elo
return elos, elo_probs, elo_dict
afl_df['train_data'] = 1
df_next_games_teams['train_data'] = 0
afl_data = afl_df.append(df_next_games_teams).reset_index(drop=True)
features_rolling_averages = create_exp_weighted_avgs(afl_data, span=10)
features = afl_data[['match.date', 'match.matchId', 'match.homeTeam.name', 'match.awayTeam.name', 'venue.name','round.year','train_data']].copy()
features = pd.merge(features, features_rolling_averages, on=['match.matchId', 'match.homeTeam.name'])
form_btwn_teams = afl_df[['match.matchId', 'match.homeTeam.name', 'match.awayTeam.name', 'f_goal_diff']].copy()
elos, elo_probs, elo_dict = elo_applier(afl_data, 30)
# Add our created features - elo, efficiency etc.
#features = (features.assign(f_elo_home=lambda df: df['match.matchId'].map(elos).apply(lambda x: x[0]),
# f_elo_away=lambda df: df['match.matchId'].map(elos).apply(lambda x: x[1]))
# .reset_index(drop=True))
# form_btwn_teams_inv = pd.DataFrame()
# for index, row in form_btwn_teams.iterrows():
# home = row['match.homeTeam.name']
# away = row['match.awayTeam.name']
# matchid = row['match.matchId']
# margin = row['f_goal_diff']
# form_btwn_teams_inv = form_btwn_teams_inv.append({'match.matchId': matchid, 'match.homeTeam.name': away, 'match.awayTeam.name': home, 'f_goal_diff': -1*margin}, ignore_index=True)
# form_btwn_teams['f_form_margin_btwn_teams'] = (form_btwn_teams.groupby(['match.homeTeam.name', 'match.awayTeam.name'])['f_goal_diff']
# .transform(lambda row: row.rolling(5).mean().shift())
# .fillna(0))
# form_btwn_teams['f_form_past_5_btwn_teams'] = \
# (form_btwn_teams.assign(win=lambda df: df.apply(lambda row: 1 if row.f_goal_diff > 0 else 0, axis='columns'))
# .groupby(['match.homeTeam.name', 'match.awayTeam.name'])['win']
# .transform(lambda row: row.rolling(5).mean().shift() * 5)
# .fillna(0))
#print(features.shape)
# Merge to our features df
#features = pd.merge(features, form_btwn_teams_1.drop(columns=['f_goal_diff']), on=['match.matchId', 'match.homeTeam.name', 'match.awayTeam.name'])
#print(features.shape)
# Get the result and merge to the feature_df
match_results = (afl_df.assign(result=lambda df: df.apply(lambda row: 1 if row['f_goal_diff'] > 0 else 0, axis=1)))
# Merge result column to feature_df
feature_df = | pd.merge(features, match_results[['match.matchId', 'result']], on='match.matchId') | pandas.merge |
"""Load the processed QCMR data."""
from pathlib import Path
from typing import Iterator, Tuple, Type
import numpy as np
import pandas as pd
from pydantic import validate_arguments
from ..utils.misc import fiscal_year_quarter_from_path
from . import cash, obligations, personal_services, positions
from .base import ETLPipelineQCMR
from .cash.core import CASH_DATA_TYPE
__all__ = [
"load_cash_reports",
"load_department_obligations",
"load_fulltime_positions",
"load_personal_services_summary",
]
def _load_processed_results(
cls: Type[ETLPipelineQCMR],
) -> Iterator[Tuple[Path, int, int]]:
"""Internal helper function for loading processed results."""
# Get the files
dirname = cls.get_data_directory("processed")
files = sorted(dirname.glob("*.csv"), reverse=True)
# Loop over each file
for f in files:
# Get fiscal year and quarter
fiscal_year, quarter = fiscal_year_quarter_from_path(f)
# Yield
yield f, fiscal_year, quarter
def _load_department_reports(cls: Type[ETLPipelineQCMR]) -> pd.DataFrame:
"""Internal function to load department-based QCMR reports."""
all_df = []
fiscal_years = set()
report_fiscal_years = set()
for f, fiscal_year, quarter in _load_processed_results(cls):
# Get fiscal year and quarter
fiscal_year, quarter = fiscal_year_quarter_from_path(f)
# Load
df = | pd.read_csv(f, dtype={"dept_code": str}) | pandas.read_csv |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Empty
import os
import csv
import time
import pandas as pd
import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#import sys
class Plots:
def __init__(self, path, param):
self.path = path
self.param = param
rospy.Subscriber("/csv/end", Empty, self.Plot)
self.restart_pub = rospy.Publisher('/restart', Empty, queue_size=1)
rospy.spin()
def Plot(self, data):
odometry = pd.read_csv(os.path.join(self.path,'odometry.csv'))
odometry.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
odometry_gt = pd.read_csv(os.path.join(self.path,'odometry_gt.csv'))
odometry_gt.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
reference = pd.read_csv(os.path.join(self.path,'reference.csv'))
reference.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
referenceGamma = pd.read_csv(os.path.join(self.path,'referenceGamma.csv'))
referenceGamma.rename(columns={'X': r'$x$', 'Y': r'$y$', 'Z': r'$z$', 'Roll': r'$\phi$', 'Pitch': r'$\theta$', 'Yaw': r'$\psi$'}, inplace = True)
odometry_df = odometry.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
odometry_gt_df = odometry_gt.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
reference_df = reference.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
referenceGamma_df = referenceGamma.loc[:, ['Tiempo',r'$x$',r'$y$',r'$z$',r'$\psi$']]
odometry_df.plot(x=0,grid=True,title='Odometry').get_figure().savefig(os.path.join(self.path,'odometry.png'))
odometry_gt_df.plot(x=0,grid=True,title='Odometry_GT').get_figure().savefig(os.path.join(self.path,'odometry_gt.png'))
reference_df.plot(x=0,grid=True,title='Reference').get_figure().savefig(os.path.join(self.path,'reference.png'))
referenceGamma_df.plot(x=0,grid=True,title='Reference_Gamma').get_figure().savefig(os.path.join(self.path,'referenceGamma.png'))
errors = {}
#cambios= {}
#for ax in [r'x',r'y',r'z',r'\phi',r'\theta',r'\psi']:
for ax in [r'x',r'y',r'z',r'\psi']:
ax_orig = r'$' + ax + r'$'
ax_odom = r'$' + ax + r'_{odom}$'
ax_gt = r'$' + ax + r'_{gt}$'
ax_ref = r'$' + ax + r'_{ref}$'
ax_gamma = r'$' + ax + r'_{gamma}$'
ax_err = r'$e_{' + ax + r'}$'
odometry_ = odometry_df.loc[:,['Tiempo',ax_orig]]
odometry_.rename(columns={ax_orig: ax_odom},inplace = True)
odometry_gt_ = odometry_gt_df.loc[:,['Tiempo',ax_orig]]
odometry_gt_.rename(columns={ax_orig: ax_gt},inplace = True)
reference_ = reference_df.loc[:,['Tiempo',ax_orig]]
reference_.rename(columns={ax_orig: ax_ref},inplace = True)
referenceGamma_ = referenceGamma_df.loc[:,['Tiempo',ax_orig]]
referenceGamma_.rename(columns={ax_orig: ax_gamma},inplace = True)
df = pd.merge(odometry_, odometry_gt_, on='Tiempo', how='inner')
df = | pd.merge(df, reference_, on='Tiempo', how='inner') | pandas.merge |
# Adapted from
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/tensor.py
# and
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/arrow_conversion.py
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications:
# - Added ArrowTensorType.to_pandas_type()
# - Added ArrowTensorArray.__getitem__()
# - Added ArrowTensorArray.__iter__()
# - Added support for column casts to extension types.
# - Fleshed out docstrings and examples.
# - Fixed TensorArray.isna() so it returns an appropriate ExtensionArray.
# - Added different (more vectorized) TensorArray.take() operation.
# - Added support for more reducers (agg funcs) to TensorArray.
# - Added support for logical operators to TensorArray(Element).
# - Miscellaneous small bug fixes and optimizations.
from collections import Iterable
import numbers
from typing import Sequence, Any, Union, Tuple, Optional, Callable
import numpy as np
import pandas as pd
from pandas._typing import Dtype
from pandas.compat import set_function_name
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
try:
from pandas.core.dtypes.generic import ABCIndex
except ImportError:
# ABCIndexClass changed to ABCIndex in Pandas 1.3
from pandas.core.dtypes.generic import ABCIndexClass as ABCIndex
from pandas.core.indexers import check_array_indexer, validate_indices
import pyarrow as pa
from ray.util.annotations import PublicAPI
# -----------------------------------------------------------------------------
# Pandas extension type and array
# -----------------------------------------------------------------------------
@PublicAPI(stability="beta")
@pd.api.extensions.register_extension_dtype
class TensorDtype(pd.api.extensions.ExtensionDtype):
"""
Pandas extension type for a column of fixed-shape, homogeneous-typed
tensors.
See:
https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
for up-to-date interface documentation and the subclassing contract. The
docstrings of the below properties and methods were copied from the base
ExtensionDtype.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": list(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note the opaque np.object dtype for this column.
>>> df.dtypes
one int64
two object
dtype: object
>>> # Cast column to our TensorDtype extension type.
>>> df["two"] = df["two"].astype(TensorDtype())
>>> # Note that the column dtype is now TensorDtype instead of
>>> # np.object.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is now aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
# NOTE(Clark): This is apparently required to prevent integer indexing
# errors, but is an undocumented ExtensionDtype attribute. See issue:
# https://github.com/CODAIT/text-extensions-for-pandas/issues/166
base = None
@property
def type(self):
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
return TensorArrayElement
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
return "TensorDtype"
@classmethod
def construct_from_string(cls, string: str):
"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return TensorArray
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
"""
Convert a pyarrow (chunked) array to a TensorArray.
This and TensorArray.__arrow_array__ make up the
Pandas extension type + array <--> Arrow extension type + array
interoperability protocol. See
https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
for more information.
"""
if isinstance(array, pa.ChunkedArray):
if array.num_chunks > 1:
# TODO(Clark): Remove concat and construct from list with
# shape.
values = np.concatenate(
[chunk.to_numpy() for chunk in array.iterchunks()]
)
else:
values = array.chunk(0).to_numpy()
else:
values = array.to_numpy()
return TensorArray(values)
class TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
"""
Mixin for TensorArray operator support, applying operations on the
underlying ndarrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
"""
Add support for binary operators by unwrapping, applying, and
rewrapping.
"""
# NOTE(Clark): This overrides, but coerce_to_dtype, result_dtype might
# not be needed
def _binop(self, other):
lvalues = self._tensor
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndex)):
# Rely on Pandas to unbox and dispatch to us.
return NotImplemented
# divmod returns a tuple
if op_name in ["__divmod__", "__rdivmod__"]:
# TODO(Clark): Add support for divmod and rdivmod.
# div, mod = result
raise NotImplementedError
if isinstance(other, (TensorArray, TensorArrayElement)):
rvalues = other._tensor
else:
rvalues = other
result = op(lvalues, rvalues)
# Force a TensorArray if rvalue is not a scalar.
if isinstance(self, TensorArrayElement) and (
not isinstance(other, TensorArrayElement) or not np.isscalar(other)
):
result_wrapped = TensorArray(result)
else:
result_wrapped = cls(result)
return result_wrapped
op_name = f"__{op.__name__}__"
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_logical_method(cls, op):
return cls._create_method(op)
class TensorArrayElement(TensorOpsMixin):
"""
Single element of a TensorArray, wrapping an underlying ndarray.
"""
def __init__(self, values: np.ndarray):
"""
Construct a TensorArrayElement from a NumPy ndarray.
Args:
values: ndarray that underlies this TensorArray element.
"""
self._tensor = values
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def to_numpy(self):
"""
Return the values of this element as a NumPy ndarray.
"""
return np.asarray(self._tensor)
def __array__(self):
return np.asarray(self._tensor)
@PublicAPI(stability="beta")
class TensorArray(pd.api.extensions.ExtensionArray, TensorOpsMixin):
"""
Pandas `ExtensionArray` representing a tensor column, i.e. a column
consisting of ndarrays as elements. All tensors in a column must have the
same shape.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": TensorArray(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note that the column dtype is TensorDtype.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
SUPPORTED_REDUCERS = {
"sum": np.sum,
"all": np.all,
"any": np.any,
"min": np.min,
"max": np.max,
"mean": np.mean,
"median": np.median,
"prod": np.prod,
"std": np.std,
"var": np.var,
}
# See https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py # noqa
# for interface documentation and the subclassing contract.
def __init__(
self,
values: Union[
np.ndarray,
ABCSeries,
Sequence[Union[np.ndarray, TensorArrayElement]],
TensorArrayElement,
Any,
],
):
"""
Args:
values: A NumPy ndarray or sequence of NumPy ndarrays of equal
shape.
"""
if isinstance(values, ABCSeries):
# Convert series to ndarray and passthrough to ndarray handling
# logic.
values = values.to_numpy()
if isinstance(values, np.ndarray):
if (
values.dtype.type is np.object_
and len(values) > 0
and isinstance(values[0], (np.ndarray, TensorArrayElement))
):
# Convert ndarrays of ndarrays/TensorArrayElements
# with an opaque object type to a properly typed ndarray of
# ndarrays.
self._tensor = np.array([np.asarray(v) for v in values])
else:
self._tensor = values
elif isinstance(values, Sequence):
if len(values) == 0:
self._tensor = np.array([])
else:
self._tensor = np.stack([np.asarray(v) for v in values], axis=0)
elif isinstance(values, TensorArrayElement):
self._tensor = np.array([np.asarray(values)])
elif np.isscalar(values):
# `values` is a single element:
self._tensor = np.array([values])
elif isinstance(values, TensorArray):
raise TypeError("Use the copy() method to create a copy of a TensorArray")
else:
raise TypeError(
"Expected a numpy.ndarray or sequence of numpy.ndarray, "
f"but received {values} "
f"of type '{type(values)}' instead."
)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this
method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
if copy and isinstance(scalars, np.ndarray):
scalars = scalars.copy()
elif isinstance(scalars, TensorArray):
scalars = scalars._tensor.copy() if copy else scalars._tensor
return TensorArray(scalars)
@classmethod
def _from_factorized(
cls, values: np.ndarray, original: pd.api.extensions.ExtensionArray
):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
factorize : Top-level factorize method that dispatches here.
ExtensionArray.factorize : Encode the extension array as an enumerated
type.
"""
raise NotImplementedError
def __getitem__(
self, item: Union[int, slice, np.ndarray]
) -> Union["TensorArray", "TensorArrayElement"]:
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
# Return scalar if single value is selected, a TensorArrayElement for
# single array element, or TensorArray for slice.
if isinstance(item, int):
value = self._tensor[item]
if np.isscalar(value):
return value
else:
return TensorArrayElement(value)
else:
# BEGIN workaround for Pandas issue #42430
if isinstance(item, tuple) and len(item) > 1 and item[0] == Ellipsis:
if len(item) > 2:
# Hopefully this case is not possible, but can't be sure
raise ValueError(
"Workaround Pandas issue #42430 not "
"implemented for tuple length > 2"
)
item = item[1]
# END workaround for issue #42430
if isinstance(item, TensorArray):
item = np.asarray(item)
item = check_array_indexer(self, item)
return TensorArray(self._tensor[item])
def __len__(self) -> int:
"""
Length of this array.
Returns
-------
length : int
"""
return len(self._tensor)
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
"""
An instance of 'ExtensionDtype'.
"""
return TensorDtype()
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
return self._tensor.nbytes
def isna(self) -> "TensorArray":
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
if self._tensor.dtype.type is np.object_:
# Avoid comparing with __eq__ because the elements of the tensor
# may do something funny with that operation.
result_list = [self._tensor[i] is None for i in range(len(self))]
result = np.broadcast_to(
np.array(result_list, dtype=np.bool), self.numpy_shape
)
elif self._tensor.dtype.type is np.str_:
result = self._tensor == ""
else:
result = np.isnan(self._tensor)
return TensorArray(result)
def take(
self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
) -> "TensorArray":
"""
Take elements from an array.
Parameters
----------
indices : sequence of int
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
See Also
--------
numpy.take : Take elements from an array along an axis.
api.extensions.take : Take elements from an array.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
if allow_fill:
# With allow_fill being True, negative values in `indices` indicate
# missing values and should be set to `fill_value`.
indices = np.asarray(indices, dtype=np.intp)
validate_indices(indices, len(self._tensor))
# Check if there are missing indices to fill, otherwise we can
# delegate to NumPy ndarray .take().
has_missing = np.any(indices < 0)
if has_missing:
if fill_value is None:
fill_value = np.nan
# Create an array populated with fill value.
values = np.full((len(indices),) + self._tensor.shape[1:], fill_value)
# Put tensors at the given positive indices into array.
is_nonneg = indices >= 0
np.put(values, np.where(is_nonneg)[0], self._tensor[indices[is_nonneg]])
return TensorArray(values)
# Delegate take to NumPy array.
values = self._tensor.take(indices, axis=0)
return TensorArray(values)
def copy(self) -> "TensorArray":
"""
Return a copy of the array.
Returns
-------
ExtensionArray
"""
# TODO(Clark): Copy cached properties.
return TensorArray(self._tensor.copy())
@classmethod
def _concat_same_type(cls, to_concat: Sequence["TensorArray"]) -> "TensorArray":
"""
Concatenate multiple array of this dtype.
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
return TensorArray(np.concatenate([a._tensor for a in to_concat]))
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
key = check_array_indexer(self, key)
if isinstance(value, TensorArrayElement) or np.isscalar(value):
value = np.asarray(value)
if isinstance(value, list):
value = [
np.asarray(v) if isinstance(v, TensorArrayElement) else v for v in value
]
if isinstance(value, ABCSeries) and isinstance(value.dtype, TensorDtype):
value = value.values
if value is None or isinstance(value, Sequence) and len(value) == 0:
self._tensor[key] = np.full_like(self._tensor[key], np.nan)
elif isinstance(key, (int, slice, np.ndarray)):
self._tensor[key] = value
else:
raise NotImplementedError(
f"__setitem__ with key type '{type(key)}' not implemented"
)
def __contains__(self, item) -> bool:
"""
Return for `item in self`.
"""
if isinstance(item, TensorArrayElement):
np_item = np.asarray(item)
if np_item.size == 1 and np.isnan(np_item).all():
return self.isna().any()
return super().__contains__(item)
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
# TODO(Clark): return self._tensor, np.nan
raise NotImplementedError
def _reduce(self, name: str, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
supported_kwargs = ["ddof"]
reducer_kwargs = {}
for kw in supported_kwargs:
try:
reducer_kwargs[kw] = kwargs[kw]
except KeyError:
pass
try:
return TensorArrayElement(
self.SUPPORTED_REDUCERS[name](self._tensor, axis=0, **reducer_kwargs)
)
except KeyError:
raise NotImplementedError(f"'{name}' aggregate not implemented.") from None
def __array__(self, dtype: np.dtype = None):
return np.asarray(self._tensor, dtype=dtype)
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs, **kwargs):
"""
Supports NumPy ufuncs without requiring sloppy coercion to an
ndarray.
"""
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, (TensorArray, np.ndarray, numbers.Number)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._tensor if isinstance(x, TensorArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._tensor if isinstance(x, TensorArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# Multiple return values.
return tuple(type(self)(x) for x in result)
elif method == "at":
# No return value.
return None
else:
# One return value.
return type(self)(result)
def to_numpy(
self,
dtype: np.dtype = None,
copy: bool = False,
na_value: Any = pd.api.extensions.no_default,
):
"""
Convert to a NumPy ndarray.
.. versionadded:: 1.0.0
This is similar to :meth:`numpy.asarray`, but may provide additional
control over how the conversion is done.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
Returns
-------
numpy.ndarray
"""
if dtype is not None:
dtype = pd.api.types.pandas_dtype(dtype)
if copy:
values = np.array(self._tensor, dtype=dtype, copy=True)
else:
values = self._tensor.astype(dtype)
elif copy:
values = self._tensor.copy()
else:
values = self._tensor
return values
@property
def numpy_dtype(self):
"""
Get the dtype of the tensor.
:return: The numpy dtype of the backing ndarray
"""
return self._tensor.dtype
@property
def numpy_ndim(self):
"""
Get the number of tensor dimensions.
:return: integer for the number of dimensions
"""
return self._tensor.ndim
@property
def numpy_shape(self):
"""
Get the shape of the tensor.
:return: A tuple of integers for the numpy shape of the backing ndarray
"""
return self._tensor.shape
@property
def _is_boolean(self):
"""
Whether this extension array should be considered boolean.
By default, ExtensionArrays are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
# This is needed to support returning a TensorArray from .isnan().
# TODO(Clark): Propagate tensor dtype to extension TensorDtype and
# move this property there.
return np.issubdtype(self._tensor.dtype, np.bool)
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
dtype = pd.api.types.pandas_dtype(dtype)
if isinstance(dtype, TensorDtype):
values = TensorArray(self._tensor.copy()) if copy else self
elif not (
pd.api.types.is_object_dtype(dtype) and pd.api.types.is_string_dtype(dtype)
):
values = np.array([str(t) for t in self._tensor])
if isinstance(dtype, pd.StringDtype):
return dtype.construct_array_type()._from_sequence(values, copy=False)
else:
return values
elif | pd.api.types.is_object_dtype(dtype) | pandas.api.types.is_object_dtype |
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from multiprocessing import cpu_count
import pytorch_lightning as pl
import torch
import functools
import traceback
import psutil
import pandas as pd
class GloveFinetuner(pl.LightningModule):
def __init__(self,
hyperparams,
model_parameters,
dataset_infos,
extra_infos):
super(GloveFinetuner, self).__init__()
#---------- hyperparams
self.learning_rate = hyperparams['learning_rate']
self.batch_size = hyperparams['batch_size']
self.hidden_dim = hyperparams['hidden_dim']
#---------- model_parameters
self.loss_funtion = model_parameters['criterion']
self.label_encoder = model_parameters['label_encoder']
self.glove_dim = model_parameters['glove_dim']
self.glove_vectors = model_parameters['glove_vectors']
#---------- dataset_infos
self.all_data = dataset_infos['all_data']
self.CustomDataset = dataset_infos['CustomDataset']
#---------- extra_infos
self.overfit = extra_infos['overfit']
self.sampler = extra_infos['sampler']
#---------- other_infos
self.predict_proba = torch.nn.Softmax(dim=1)
self.step = 'Experiment'
#---------- Dados para gráfico de Acurácia e Loss
self.df_performance_train_batch = pd.DataFrame(columns=['train_batch_loss','train_batch_acc'])
self.df_performance_train_epoch = pd.DataFrame(columns=['train_epoch_loss','train_epoch_acc'])
self.df_performance_valid_batch = | pd.DataFrame(columns=['valid_batch_loss','valid_batch_acc']) | pandas.DataFrame |
# -*- coding:/ utf-8 -*-
"""
Created on Tue Jul 23 12:07:20 2019
This piece of software is bound by The MIT License (MIT)
Copyright (c) 2019 <NAME>
Code written by : <NAME>
User name - ADM-PKA187
Email ID : <EMAIL>
Created on - Mon Jul 29 09:17:59 2019
version : 1.1
"""
# Importing the required libraries
import pandas as pd
import os
from pathlib import PureWindowsPath
def SisterVesselExtract(df, model_name):
flag = ''
df_final = pd.DataFrame(columns = ['Model name', 'Vessel name', 'IMO number', 'Sister vessel', 'Sister vessel IMO'])
df_inter_1 = pd.DataFrame(columns = ['Model name', 'Vessel name', 'IMO number', 'Sister vessel'])
df_inter_2 = pd.DataFrame(columns = ['Sister vessel IMO'])
df_sister = pd.DataFrame(columns = ['Model name', 'Vessel name', 'IMO number', 'Sister vessel'])
df_sister_imo = pd.DataFrame(columns = ['Sister vessel IMO'])
for index, row in df.iterrows():
if 'IMO number' in str(row['Unnamed: 0']):
imo_number = row['Value']
for x in row[2:]:
df_sister_imo.loc[0] = ({'Sister vessel IMO':str(x)})
df_inter_2 = pd.concat([df_inter_2, df_sister_imo])
continue
if 'Vessel name' in str(row['Unnamed: 0']):
vessel_name = row['Value']
for x in row[2:]:
df_sister.loc[0] = ({'Model name':model_name, 'Vessel name':vessel_name, 'IMO number':imo_number, 'Sister vessel':x})
df_inter_1 = pd.concat([df_inter_1, df_sister])
flag = 'X'
if flag == 'X':
break
df_final = | pd.concat([df_inter_1, df_inter_2], axis=1, sort=False) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
import pandas.util.testing as tm
class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = (
"Length mismatch: Expected axis has 30 elements, new"
" values have 29 elements"
)
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime("%Y%m%d")
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
renamed = s.rename({"b": "foo", "d": "bar"})
tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))
# index with name
renamer = Series(
np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"
)
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name="foo")
renamer = | Series({1: 10, 2: 20}) | pandas.Series |
import streamlit as st
import pandas as pd
import plotly.express as px
import datetime
# Setting Dashboard Interface
st.set_page_config(layout="wide")
st.title('🦠COVID-19 Dashboard')
st.markdown(
'''A collaborative work of building an interactive Covid-19 dashboard to provide insights about COVID globally. [GitHub Project](https://github.com/soledadli/interactive-Covid-19-dashboard)\n
The data is from 01.22.2020 to 05.20.2021 and is collected from the Center for Systems Science and Engineering (CSSE) at Johns Hopkins University. Data source: [COVID-19 Data Repository](https://github.com/CSSEGISandData/COVID-19) ''')
st.subheader("Information about the features.")
with st.beta_expander("Explanation & Tips"):
st.markdown(""" The COVID-19 Dashboard includes the following 6 features for data visualization. \n
1. Choose Template of the plots.
2. Choose a time frame.
3. Choose countries
4. Choose categories from three types of cases:
confirmed deaths, confirmed, and recovered.
5. Choose case view: daily cases or cumulative cases.
6. Choose view between normalized over 100k and non-normalized data.
Tip 1: The graph includes the rolling average of seven last days.
Tip 2: To compare two or more countries in absolute numbers,
it is better to use the option 'Non-normalized data.'
To compare two or more countries with significant differences in population,
the best option is 'Normalized over 100k'.""")
# Experimenting with Data
@st.cache
def load_data( ):
df_cases = pd.read_csv('Data_processed/Dataset_COVID_confiremed_complete.csv', index_col = [0])
df_death= pd.read_csv('Data_processed/Dataset_COVID_Death_complete.csv', index_col=[0])
df_recovered= pd.read_csv('Data_processed/Dataset_COVID_recovered_complete.csv', index_col=[0])
df_population = pd.read_csv('Data_processed/population.csv', index_col=[0])
df_location = pd.read_csv('time_series_covid19_confirmed_global.csv', index_col=[0])
return df_cases,df_death,df_recovered,df_population,df_location
df_cases,df_death,df_recovered,df_population,df_location = load_data()
country_choice = []
def df_selection(list_countries_temp, type,df_cases,df_death,df_recovered):
list_countries_temp[0].append('Date')
columns_to_filter = list_countries_temp[0]
if (type =='Death'):
return df_death[columns_to_filter]
elif (type == 'Recovered'):
return df_recovered[columns_to_filter]
elif (type == 'Confirmed'):
return df_cases[columns_to_filter]
def vis(filter_data,list_countries,dt_choice_normal, dt_choice_cases, start_date, end_date,df_pop):
filter_data['Date'] = pd.to_datetime(filter_data['Date'], format='%m/%d/%y')
filter_data = filter_data.loc[(filter_data['Date'] >= start_date) & (filter_data['Date'] <= end_date),:]
if (dt_choice_normal == 'Non-normalized data'):
if (dt_choice_cases == "Daily Cases"): # view choices
if (len(list_countries[0]) < 3):
filter_data['Average'] = filter_data.iloc[:, 0].rolling(7).mean() # 7 day average calculation
fig = px.line(filter_data, x='Date', y=list_countries[0][0],
width=1000, height=500, template= '%s' %(dt_choice_template)) # 'Daily_France_death')
return fig
elif (len(list_countries[0]) > 2):
# for i in filter_data.columns[0:-1]:
# new_col = '7-day-rolling-' + i
# filter_data[new_col] = filter_data.loc[:, i].rolling(7).mean() # 7 day average calculation
# print(filter_data.columns[len(list_countries[0]):])
fig = px.line(filter_data, x='Date', y=filter_data.columns[0:len(list_countries[0])],
width=1000, height=500, template= '%s' %(dt_choice_template))
#for i in filter_data.columns[len(list_countries[0]):]:
# fig.add_bar(x=filter_data['Date'], y=filter_data[i] , name = i)
return fig
if (dt_choice_cases == "7-day rolling"): # 7-day rolling
if (len(list_countries[0]) < 3):
filter_data['Average'] = filter_data.iloc[:, 0].rolling(7).mean() # 7 day average calculation
fig = px.line(filter_data, x=filter_data['Date'], y=filter_data['Average'],
width=1000, height=500, template='%s' % (dt_choice_template)) # 'Daily_France_death')
return fig
elif (len(list_countries[0]) > 2):
for i in filter_data.columns[0:-1]:
new_col = '7-day-rolling-' + i
filter_data[new_col] = filter_data.loc[:, i].rolling(7).mean() # 7 day average calculation
fig = px.line(filter_data, x='Date', y=filter_data.columns[len(list_countries[0]) :],
width=1000, height=500, template='%s' % (dt_choice_template))
return fig
if (dt_choice_cases == "Cumulative Cases"):
filter_data['Cumulative'] = filter_data.iloc[:, 0].cumsum() # Cumulative Cases Calculation
if (len(list_countries[0]) < 3):
fig = px.line(filter_data, x='Date', y='Cumulative', width=1000,
height=500) # Cumulative Cases Plot
return fig
elif (len(list_countries[0]) > 2):
for i in filter_data.columns[0:-2]:
new_col = 'Cumulative' + i
filter_data[new_col] = filter_data.loc[:, i].cumsum()
fig = px.line(filter_data, x='Date', y=filter_data.columns[(len(list_countries[0])+1):],
width=1000, height=500, template= '%s' %(dt_choice_template))
return fig
elif(dt_choice_normal=='Normalized over 100k'):
if (dt_choice_cases == "Daily Cases"):
if (len(list_countries[0]) < 3):
df_pop= df_pop.reset_index()
population = df_pop[df_pop['Country (or dependency)']==list_countries[0][0]]['Population (2020)']
count_1000 = int(population)/100000
filter_data[list_countries[0][0]] = filter_data[list_countries[0][0]]/count_1000
filter_data['Average'] = filter_data.iloc[:, 0].rolling(7).mean() # 7_day_average Calculation
fig = px.line(filter_data, x='Date', y=list_countries[0][0],
width=1000, height=500, template= '%s' %(dt_choice_template))
# fig.add_bar(x=filter_data['Date'], y=filter_data['Average'], name='7 days Average') # 7_day_average plot
return fig
elif (len(list_countries[0]) > 2):
df_pop = df_pop.reset_index()
for i in range(0,len(list_countries[0])-1):
population = df_pop[df_pop['Country (or dependency)'] == list_countries[0][i]]['Population (2020)']
count_1000 = int(population) / 100000
filter_data[list_countries[0][i]] = filter_data[list_countries[0][i]]/count_1000
for i in filter_data.columns[0:-1]:
new_col = '7-day-rolling-' + i
filter_data[new_col] = filter_data.loc[:, i].rolling(7).mean() # 7 day average calculation
# print(filter_data.columns[len(list_countries[0]):])
fig = px.line(filter_data, x='Date', y=filter_data.columns[0:len(list_countries[0])],
width=1000, height=500, template= '%s' %(dt_choice_template))
return fig
if (dt_choice_cases == "7-day rolling"):
if (len(list_countries[0]) < 3):
df_pop= df_pop.reset_index()
population = df_pop[df_pop['Country (or dependency)']==list_countries[0][0]]['Population (2020)']
count_1000 = int(population)/100000
filter_data[list_countries[0][0]] = filter_data[list_countries[0][0]]/count_1000
filter_data['Average'] = filter_data.iloc[:, 0].rolling(7).mean() # 7_day_average Calculation
fig = px.line(x=filter_data['Date'], y=filter_data['Average'],width=1000, height=500, template= '%s' %(dt_choice_template)) # 7_day_average plot
return fig
elif (len(list_countries[0]) > 2):
df_pop = df_pop.reset_index()
for i in range(0, len(list_countries[0]) - 1):
population = df_pop[df_pop['Country (or dependency)'] == list_countries[0][i]]['Population (2020)']
count_1000 = int(population) / 100000
filter_data[list_countries[0][i]] = filter_data[list_countries[0][i]] / count_1000
for i in filter_data.columns[0:-1]:
new_col = '7-day-rolling-' + i
filter_data[new_col] = filter_data.loc[:, i].rolling(7).mean() # 7 day average calculation
# print(filter_data.columns[len(list_countries[0]):])
fig = px.line(filter_data, x='Date', y=filter_data.columns[len(list_countries[0]) :], width=1000, height=500, template='%s' % (dt_choice_template))
return fig
if (dt_choice_cases == "Cumulative Cases"):
if (len(list_countries[0]) < 3):
df_pop = df_pop.reset_index()
population = df_pop[df_pop['Country (or dependency)'] == list_countries[0][0]]['Population (2020)']
count_1000 = int(population) / 100000
filter_data[list_countries[0][0]] = filter_data[list_countries[0][0]] / count_1000
filter_data['Cumulative'] = filter_data.iloc[:, 0].cumsum() # Cumulative Cases Calculation
fig = px.line(filter_data, x='Date', y='Cumulative',width=1000, height=500, template= '%s' %(dt_choice_template)) # Cumulative Cases Plot
return fig
elif (len(list_countries[0]) > 2):
df_pop = df_pop.reset_index()
for i in range(0, len(list_countries[0]) - 1):
population = df_pop[df_pop['Country (or dependency)'] == list_countries[0][i]]['Population (2020)']
count_1000 = int(population) / 100000
filter_data[list_countries[0][i]] = filter_data[list_countries[0][i]] / count_1000
print(filter_data.head())
for i in filter_data.columns[0:-1]:
new_col = 'Cumulative' + i
filter_data[new_col] = filter_data.loc[:, i].cumsum()
fig = px.line(filter_data, x='Date', y=filter_data.columns[len(list_countries[0]) :],
width=1000, height=500, template= '%s' %(dt_choice_template),
)
return fig
def map_view(df_loca,country_choice,type,df_cases,df_death,df_recovered,dt_choice_normal, dt_choice_cases, start_date, end_date,df_pop):
df_loca_sub = df_loca[['Country/Region','Lat','Long']]
if (type =='Death'):
data_to_work = df_death.copy()
elif (type == 'Recovered'):
data_to_work = df_recovered.copy()
elif (type == 'Confirmed'):
data_to_work= df_cases.copy()
data_to_work['Date'] = | pd.to_datetime(data_to_work['Date'], format='%m/%d/%y') | pandas.to_datetime |
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>, <NAME>, <NAME>, <NAME>
"""Functions to generate derived time features useful
in forecasting, such as growth, seasonality, holidays.
"""
import inspect
import math
import warnings
from datetime import datetime
import fbprophet.hdays as fbholidays
import holidays
import numpy as np
import pandas as pd
from scipy.special import expit
from greykite.common import constants as cst
def convert_date_to_continuous_time(dt):
"""Converts date to continuous time. Each year is one unit.
Parameters
----------
dt : datetime object
the date to convert
Returns
-------
conti_date : `float`
the date represented in years
"""
year_length = datetime(dt.year, 12, 31).timetuple().tm_yday
tt = dt.timetuple()
return (dt.year +
(tt.tm_yday - 1
+ dt.hour / 24
+ dt.minute / (24 * 60)
+ dt.second / (24 * 3600)) / float(year_length))
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col][0])
return convert_date_to_continuous_time(date)
def build_time_features_df(dt, conti_year_origin):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : float
The origin used for creating continuous time.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
"datetime": dt,
"date": date,
"year": year,
"year_length": year_length,
"quarter": quarter,
"quarter_start": quarter_start,
"quarter_length": quarter_length,
"month": month,
"month_length": month_length,
"woy": woy,
"doy": doy,
"doq": doq,
"dom": dom,
"dow": dow,
"str_dow": str_dow,
"str_doy": str_doy,
"hour": hour,
"minute": minute,
"second": second,
"year_month": year_month,
"year_woy": year_woy,
"month_dom": month_dom,
"year_woy_dow": year_woy_dow,
"woy_dow": woy_dow,
"dow_hr": dow_hr,
"dow_hr_min": dow_hr_min,
"tod": tod,
"tow": tow,
"tom": tom,
"toq": toq,
"toy": toy,
"conti_year": conti_year,
"is_weekend": is_weekend,
"dow_grouped": dow_grouped,
"ct1": ct1,
"ct2": ct2,
"ct3": ct3,
"ct_sqrt": ct_sqrt,
"ct_root3": ct_root3,
}
df = pd.DataFrame(features_dict)
return df
def add_time_features_df(df, time_col, conti_year_origin):
"""Adds a time feature data frame to a data frame
:param df: the input data frame
:param time_col: the name of the time column of interest
:param conti_year_origin: the origin of time for the continuous time variable
:return: the same data frame (df) augmented with new columns
"""
df = df.reset_index(drop=True)
time_df = build_time_features_df(
dt=df[time_col],
conti_year_origin=conti_year_origin)
time_df = time_df.reset_index(drop=True)
return | pd.concat([df, time_df], axis=1) | pandas.concat |
from copy import deepcopy
from distutils.version import LooseVersion
from operator import methodcaller
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestDataFrame(Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_set_axis_name(self):
df = pd.DataFrame([[1, 2], [3, 4]])
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, 'foo')(df)
assert df.index.name is None
assert result.index.name == 'foo'
result = methodcaller(func, 'cols', axis=1)(df)
assert df.columns.name is None
assert result.columns.name == 'cols'
def test_set_axis_name_mi(self):
df = DataFrame(
np.empty((3, 3)),
index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]),
columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')])
)
level_names = ['L1', 'L2']
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
assert result.columns.names == ["L1", "L2"]
assert result.index.names == [None, None]
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
assert df.bool()
df = DataFrame([[False]])
assert not df.bool()
df = DataFrame([[False, False]])
with pytest.raises(ValueError):
df.bool()
with pytest.raises(ValueError):
bool(df)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
assert result.filename == 'fname1.csv|fname2.csv'
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
assert result.filename == 'foo+foo'
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert df.y == 5
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
LooseVersion(xarray.__version__) <
LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index", ['FloatIndex', 'IntIndex',
'StringIndex', 'UnicodeIndex',
'DateIndex', 'PeriodIndex',
'CategoricalIndex', 'TimedeltaIndex'])
def test_to_xarray_index_types(self, index):
from xarray import Dataset
index = getattr(tm, 'make{}'.format(index))
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')}
)
df.index = index(3)
df.index.name = 'foo'
df.columns.name = 'bar'
result = df.to_xarray()
assert result.dims['foo'] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, Dataset)
# idempotency
# categoricals are not preserved
# datetimes w/tz are not preserved
# column names are lost
expected = df.copy()
expected['f'] = expected['f'].astype(object)
expected['h'] = expected['h'].astype('datetime64[ns]')
expected.columns.name = None
assert_frame_equal(result.to_dataframe(), expected,
check_index_type=False, check_categorical=False)
@ | td.skip_if_no('xarray', min_version='0.7.0') | pandas.util._test_decorators.skip_if_no |
import os
import pytest
import yaml
import numpy as np
import pandas as pd
from collections import namedtuple
from datetime import datetime, timedelta, date
from unittest import mock
from prophet import Prophet
import mlflow
import mlflow.prophet
import mlflow.utils
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.models.utils import _read_example
from mlflow.models import infer_signature, Model
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.helper_functions import (
_compare_conda_env_requirements,
_assert_pip_requirements,
pyfunc_serve_and_score_model,
_compare_logged_code_paths,
)
class DataGeneration:
def __init__(self, **kwargs):
self.shift = kwargs["shift"]
self.start = datetime.strptime(kwargs["start"], "%Y-%M-%d")
self.size = kwargs["size"]
self.date_field = kwargs["date_field"]
self.target_field = kwargs["target_field"]
self.seasonal_period = kwargs["seasonal_period"]
self.seasonal_freq = kwargs["seasonal_freq"]
np.random.seed(42)
def _period_gen(self):
period = np.sin(np.arange(0, self.seasonal_period, self.seasonal_freq)) * 50 + 50
return np.tile(
period, int(np.ceil(self.size / (self.seasonal_period / self.seasonal_freq)))
)[: self.size]
def _generate_raw(self):
base = np.random.lognormal(mean=2.0, sigma=0.92, size=self.size)
seasonal = [
np.polyval([-5.0, -1.0], x) for x in np.linspace(start=0, stop=2, num=self.size)
]
series = (
np.linspace(start=45.0, stop=90.0, num=self.size) + base + seasonal + self._period_gen()
)
return series
def _generate_linear_data(self):
DataStruct = namedtuple("DataStruct", "dates, series")
series = self._generate_raw()
date_ranges = np.arange(
self.start, self.start + timedelta(days=self.size), timedelta(days=1)
).astype(date)
return DataStruct(date_ranges, series)
def _generate_shift_data(self):
DataStruct = namedtuple("DataStruct", "dates, series")
raw = self._generate_raw()[: int(self.size * 0.6)]
temperature = np.concatenate((raw, raw / 2.0)).ravel()[: self.size]
date_ranges = np.arange(
self.start, self.start + timedelta(days=self.size), timedelta(days=1)
).astype(date)
return DataStruct(date_ranges, temperature)
def _gen_series(self):
if self.shift:
return self._generate_shift_data()
else:
return self._generate_linear_data()
def create_series_df(self):
gen_data = self._gen_series()
temporal_df = pd.DataFrame.from_records(gen_data).T
temporal_df.columns = [self.date_field, self.target_field]
return temporal_df
TEST_CONFIG = {
"shift": False,
"start": "2011-07-25",
"size": 365 * 4,
"seasonal_period": 7,
"seasonal_freq": 0.1,
"date_field": "ds",
"target_field": "y",
}
FORECAST_HORIZON = 60
SEED = 98765
HORIZON_FIELD_NAME = "horizon"
TARGET_FIELD_NAME = "yhat"
DS_FORMAT = "%Y-%m-%dT%H:%M:%S"
INFER_FORMAT = "%Y-%m-%d %H:%M:%S"
ModelWithSource = namedtuple("ModelWithSource", ["model", "data"])
pytestmark = pytest.mark.large
@pytest.fixture(scope="module")
def prophet_model():
np.random.seed(SEED)
data = DataGeneration(**TEST_CONFIG).create_series_df()
model = Prophet().fit(data)
return ModelWithSource(model, data)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
@pytest.fixture
def prophet_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet", "pytest"])
return conda_env
def future_horizon_df(model, horizon):
return model.make_future_dataframe(periods=horizon)
def generate_forecast(model, horizon):
return model.predict(model.make_future_dataframe(periods=horizon))[TARGET_FIELD_NAME]
def test_model_native_save_load(prophet_model, model_path):
model = prophet_model.model
mlflow.prophet.save_model(pr_model=model, path=model_path)
loaded_model = mlflow.prophet.load_model(model_uri=model_path)
np.testing.assert_array_equal(
generate_forecast(model, FORECAST_HORIZON),
loaded_model.predict(future_horizon_df(loaded_model, FORECAST_HORIZON))[TARGET_FIELD_NAME],
)
def test_model_pyfunc_save_load(prophet_model, model_path):
model = prophet_model.model
mlflow.prophet.save_model(pr_model=model, path=model_path)
loaded_pyfunc = pyfunc.load_model(model_uri=model_path)
horizon_df = future_horizon_df(model, FORECAST_HORIZON)
np.testing.assert_array_equal(
generate_forecast(model, FORECAST_HORIZON),
loaded_pyfunc.predict(horizon_df)[TARGET_FIELD_NAME],
)
def test_signature_and_examples_saved_correctly(prophet_model):
data = prophet_model.data
model = prophet_model.model
horizon_df = future_horizon_df(model, FORECAST_HORIZON)
signature_ = infer_signature(data, model.predict(horizon_df))
example_ = data[0:5].copy(deep=False)
example_["y"] = | pd.to_numeric(example_["y"]) | pandas.to_numeric |
# %% Packages
import os
import shutil
import glob
import numpy as np
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pyhocon import ConfigTree
from typing import List, Tuple
from sklearn.utils.class_weight import compute_sample_weight
from src.base_classes.task import PreProcessing
from src.utils.logging import get_logger
# %% Logger
logger = get_logger()
# %% Code
class TaskPreprocessClassification(PreProcessing):
name = "task_preprocess_classification"
dependencies = ["task_scrapping_images"]
def __init__(self, config: ConfigTree) -> None:
super().__init__(config, self.name)
def run(self):
logger.info("Loading the meta information")
meta_df_path = self.path_input.get_string("processed_meta_information")
meta_df = self.load_pickle(meta_df_path)
logger.info("Create the target feature")
meta_df.loc[:, "target"] = self.create_target(meta_df)
logger.info("Checking the imbalance")
adj_meta_df = self.adjust_imbalance(meta_df)
logger.info("Create the image paths from the image number")
adj_meta_df_w_path = self.add_image_path(adj_meta_df)
logger.info("Adding the sample weight to the dataframe")
adj_meta_df_w_path_weight = self.add_sample_weight(adj_meta_df_w_path)
logger.info("Save the processed dataframe")
final_df = adj_meta_df_w_path_weight.loc[:, ["target", "paths", "weight_col"]]
dataframe_path = self.path_output.get_string("clf_processed")
self.save_pickle(saving_path=dataframe_path, file=final_df)
logger.info("Plot example images with label")
self.plot_label_examples(final_df)
def add_sample_weight(self, data: pd.DataFrame) -> pd.DataFrame:
"""This method extracts the appropriate sample weight in order to counter
the target imbalance. These sample weights are of course only used during the
training phase of the model and neither in the predictions nor evaluation.
:param data: Dataframe without the sample weights
:type data: pd.DataFrame
:return: Dataframe with sample weights
:rtype: pd.DataFrame
"""
y = data.loc[:, "target"]
data.loc[:, "weight_col"] = compute_sample_weight(class_weight="balanced", y=y)
df_weight = data.loc[:, ["target", "weight_col"]].drop_duplicates()
fname = self.get_figure_path("sample_weights")
fig, axs = plt.subplots(figsize=(10, 5))
axs.bar(x=df_weight.loc[:, "target"], height=df_weight.loc[:, "weight_col"])
axs.set_xlabel("Categories")
axs.set_ylabel("Sample weight")
axs.tick_params(axis="x", rotation=90)
fig.savefig(fname=fname, bbox_inches="tight")
return data
def adjust_imbalance(self, data: pd.DataFrame) -> pd.DataFrame:
"""This method checks and plots the imbalance of the target value. Afterwards,
it deletes all observations which belong to a minority class. A minority class
is defined by having fewer observations than the in the configuration file
specified threshold
:param data: Dataframe containing the target columns, containing all
observations.
:type data: pd.DataFrame
:return: Dataframe containing only the observations which occur more often
than the specified threshold.
:rtype: pd.DataFrame
"""
logger.info("Plotting the imbalance of the target categories")
continent_list = self.parameters.get_list("continent_list")
target = data.loc[:, "target"]
self.plot_imbalance(target, continent_list)
logger.info("Extracting majority categories")
category_boolean = data.loc[:, "continent"].isin(continent_list)
return data.loc[category_boolean, :].reset_index(drop=True)
def get_majority_boolean(
self, multi_label: List[Tuple[str]], threshold_value: int
) -> List[bool]:
"""This method checks which multi-target has more observations than the
threshold.
:param multi_label: List of multi-label target
:type multi_label: List[Tuple[str]]
:param threshold_value: Threshold value above which the number of occurences
have to be, in order to pass
:type threshold_value: int
:return: A list of booleans which category is above the threshold
:rtype: List[bool]
"""
target_above_thresh = | pd.Series(multi_label) | pandas.Series |
import yfinance as yf
import datetime as dt
import pandas as pd
import time
from yahoo_fin import stock_info as si
pd.set_option('display.max_columns', None)
mylist = []
today = dt.date.today()
mylist.append(today)
today = mylist[0]
#Asks for stock ticker
stocks = si.tickers_sp500()
stocks = [item.replace(".", "-") for item in stocks]
watch = []
watch_pct = []
watch_mean = []
watch_std = []
def_watch = []
def_watch_pct = []
def_watch_mean = []
def_watch_std = []
must_watch = []
must_watch_pct = []
must_watch_mean = []
must_watch_std = []
for stock in stocks:
try:
df = pd.read_csv(f'/Users/shashank/Documents/Code/Python/Outputs/S&P500/{stock}.csv', index_col=0)
df = df[['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']]
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
def to_df(figure):
"""
Extracts the data from a Plotly Figure
Parameters
----------
figure : plotly_figure
Figure from which data will be
extracted
Returns a DataFrame or list of DataFrame
"""
dfs=[]
for trace in figure['data']:
if 'scatter' in trace['type']:
try:
if type(trace['x'][0])==float:
index=trace['x']
else:
index=pd.to_datetime(trace['x'])
except:
index=trace['x']
if 'marker' in trace:
d={}
if 'size' in trace['marker']:
size=trace['marker']['size']
if type(size)!=list:
size=[size]*len(index)
d['size']=size
if 'text' in trace:
d['text']=trace['text']
if 'name' in trace:
name=trace['name']
if type(name)!=list:
name=[name]*len(index)
d['categories']=name
d['y']=trace['y']
d['x']=trace['x']
if 'z' in trace:
d['z']=trace['z']
df=pd.DataFrame(d)
else:
df=pd.Series(trace['y'],index=index,name=trace['name'])
dfs.append(df)
elif trace['type'] in ('heatmap','surface'):
df=pd.DataFrame(trace['z'].transpose(),index=trace['x'],columns=trace['y'])
dfs.append(df)
elif trace['type'] in ('box','histogram'):
vals=trace['x'] if 'x' in trace else trace['y']
df=pd.DataFrame({trace['name']:vals})
dfs.append(df)
if max(list(map(len,dfs)))==min(list(map(len,dfs))):
if len(dfs)==1:
return dfs[0]
else:
if type(dfs[0])==pd.core.series.Series:
return pd.concat(dfs,axis=1)
if all(dfs[0].columns==dfs[1].columns):
return pd.concat(dfs,axis=0)
else:
return | pd.concat(dfs,axis=1) | pandas.concat |
import os
os.environ['CUDA_VISIBLE_DEVICES']='4'
import argparse
import logging
import torch
from torchvision import transforms
from seq2seq.trainer.supervised_trainer import SupervisedTrainer
from seq2seq.models.DecoderRNN import DecoderRNN
from seq2seq.models.EncoderRNN import EncoderRNN
from seq2seq.models.seq2seq import Seq2Seq
from seq2seq.dataset.WFDataset import WFDataset,ToTensor
from util.checkpoint import Checkpoint
import torch.optim as optim
import torch.nn as nn
from torch.optim.lr_scheduler import StepLR
from seq2seq.optim.optim import Optimizer
raw_input = input # Python 3
import pandas as pd
import numpy as np
import util.Scaler as Scaler
import util.StdScaler as StdScaler
from seq2seq.loss.myloss import WFMSELoss
parser = argparse.ArgumentParser()
parser.add_argument('--expt_dir', action='store', dest='expt_dir', default='./experiement_default',
help='Path to experiment directory. If load_checkpoint is True, then path to checkpoint directory has to be provided')
parser.add_argument('--load_checkpoint', action='store', dest='load_checkpoint',
help='The name of the checkpoint to load, usually an encoded time string')
parser.add_argument('--resume', action='store_true', dest='resume',
default=False,
help='Indicates if training has to be resumed from the latest checkpoint')
parser.add_argument('--log-level', dest='log_level',
default='info',
help='Logging level.')
parser.add_argument('--use_attention',dest='use_attention',default=False)
parser.add_argument('--rnn_cell_type',dest='rnn_cell_type',default='lstm')
parser.add_argument('--rnn_layers',dest='rnn_layers',default=5,type=int)
parser.add_argument('--rnn_dropout',dest='rnn_dropout',default=0,type=float)
parser.add_argument('--batch_size',default=16,dest='batch_size',type=int)
parser.add_argument('--device',default='cpu',dest='device')
parser.add_argument('--lr',default=1e-4,dest='lr',type=float)
parser.add_argument('--weight_decay',default=5e-4,dest='weight_decay',type=float)
parser.add_argument('--use_custome_loss',dest='use_custome_loss',default=False,type=bool)
parser.add_argument('--scaler',dest='scaler',default='minmax',type=str)
parser.add_argument('--begin_compute_loss_index',dest='begin_compute_loss_index',default=0,type=int)
opt = parser.parse_args()
LOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=getattr(logging, opt.log_level.upper()))
logging.info(opt)
feature_columns=['t2m_obs', 'rh2m_obs', 'w10m_obs', 't2m_prophet','rh2m_prophet','w10m_prophet',
't2m_M','rh2m_M','w10m_M','hour_sin','hour_cos','month_sin','month_cos',
'psur_obs', 'q2m_obs', 'd10m_obs', 'u10m_obs',
'v10m_obs', 'RAIN_obs','psfc_M', 'q2m_M', 'd10m_M', 'u10m_M', 'v10m_M',
'SWD_M', 'GLW_M', 'HFX_M', 'RAIN_M', 'PBLH_M', 'TC975_M', 'TC925_M',
'TC850_M', 'TC700_M', 'TC500_M', 'wspd925_M', 'wspd850_M', 'wspd700_M', 'wspd500_M',
'location_90001','location_90002','location_90003','location_90004',
'location_90005','location_90006','location_90007','location_90008',
'location_90009','location_90010'] # remove LH_M
print(len(feature_columns))
device = torch.device(opt.device)
train_array_list=[]
validation_array_list=[]
if opt.scaler=='minmax':
scaler=Scaler.Scaler(Scaler.MINMAX_DICT,feature_range=(-1,1))
else:
scaler=StdScaler.StdScaler(StdScaler.MEANSTD_DICT)
for i in range(90001,90011):
train_data = pd.read_csv('../data/train/merge/merged_' + str(i) + '.csv', index_col=0)
train_data.index= | pd.to_datetime(train_data.index) | pandas.to_datetime |
import os
import streamlit as st
import pandas as pd
import plotly.express as px
from PIL import Image
favicon = Image.open("media/favicon.ico")
st.set_page_config(
page_title = "AICS Results",
page_icon = favicon,
menu_items={
'Get Help': 'https://github.com/All-IISER-Cubing-Society/Results',
'Report a bug': "https://github.com/All-IISER-Cubing-Society/Results/issues",
'About': "AICS Results is a Streamlit app to visualize data of weekly event results. Contact Purva at AICS for any issues or help."
}
)
results = "results/"
@st.cache
def load_data():
# Get all files in the results directory
files = os.listdir("results")
frames = []
# Loop through all files and append dataframes to a list
for f in files:
df = pd.read_csv(os.path.join("results", f))
# Convert Date column to datetime field
df['Date'] = pd.to_datetime(df['Date'])
# Create an event column
event = f.rstrip(".csv")
df['Event'] = [event for i in range(len(df))]
# Append to list
frames.append(df)
# Create combined data frame
cdf = pd.concat(frames)
return cdf
@st.cache
def load_event_data(data, name, events):
frames = []
for event in events:
df = data[data['Event'] == event]
frames.append(df)
combined_data = | pd.concat(frames) | pandas.concat |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = | Series([], dtype="M8[ns]") | pandas.Series |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import pandas as pd
import numpy as np
from pm4py.algo.clustering.trace_attribute_driven.variants import act_dist_calc, suc_dist_calc
from pm4py.algo.clustering.trace_attribute_driven.util import filter_subsets
from scipy.spatial.distance import pdist
from pm4py.util import exec_utils
from enum import Enum
from pm4py.util import constants
class Parameters(Enum):
ATTRIBUTE_KEY = constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
SINGLE = "single"
BINARIZE = "binarize"
POSITIVE = "positive"
LOWER_PERCENT = "lower_percent"
def inner_prod_calc(df):
innerprod = ((df.loc[:, 'freq_x']) * (df.loc[:, 'freq_y'])).sum()
sqrt_1 = np.sqrt(((df.loc[:, 'freq_x']) ** 2).sum())
sqrt_2 = np.sqrt(((df.loc[:, 'freq_y']) ** 2).sum())
return innerprod, sqrt_1, sqrt_2
def dist_calc(var_list_1, var_list_2, log1, log2, freq_thres, num, alpha, parameters=None):
'''
this function compare the activity similarity between two sublogs via the two lists of variants.
:param var_list_1: lists of variants in sublog 1
:param var_list_2: lists of variants in sublog 2
:param freq_thres: same as sublog2df()
:param log1: input sublog1 of sublog2df(), which must correspond to var_list_1
:param log2: input sublog2 of sublog2df(), which must correspond to var_list_2
:param alpha: the weight parameter between activity similarity and succession similarity, which belongs to (0,1)
:param parameters: state which linkage method to use
:return: the similarity value between two sublogs
'''
if parameters is None:
parameters = {}
single = exec_utils.get_param_value(Parameters.SINGLE, parameters, False)
if len(var_list_1) >= len(var_list_2):
max_len = len(var_list_1)
min_len = len(var_list_2)
max_var = var_list_1
min_var = var_list_2
var_count_max = filter_subsets.sublog2df(log1, freq_thres, num)['count']
var_count_min = filter_subsets.sublog2df(log2, freq_thres, num)['count']
else:
max_len = len(var_list_2)
min_len = len(var_list_1)
max_var = var_list_2
min_var = var_list_1
var_count_max = filter_subsets.sublog2df(log2, freq_thres, num)['count']
var_count_min = filter_subsets.sublog2df(log1, freq_thres, num)['count']
# act
max_per_var_act = np.zeros(max_len)
max_freq_act = np.zeros(max_len)
col_sum_act = np.zeros(max_len)
# suc
max_per_var_suc = np.zeros(max_len)
col_sum_suc = np.zeros(max_len)
max_freq_suc = np.zeros(max_len)
if var_list_1 == var_list_2:
print("Please give different variant lists!")
else:
for i in range(max_len):
dist_vec_act = np.zeros(min_len)
dist_vec_suc = np.zeros(min_len)
df_1_act = act_dist_calc.occu_var_act(max_var[i])
df_1_suc = suc_dist_calc.occu_var_suc(max_var[i], parameters={"binarize": True})
for j in range(min_len):
df_2_act = act_dist_calc.occu_var_act(min_var[j])
df_2_suc = suc_dist_calc.occu_var_suc(min_var[j], parameters={"binarize": True})
df_act = | pd.merge(df_1_act, df_2_act, how='outer', on='var') | pandas.merge |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, | pd.Timestamp('2015-01-07') | pandas.Timestamp |
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# CallFlow Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pandas as pd
class RuntimeScatterplot:
def __init__(self, state, module):
self.graph = state.new_gf.graph
self.df = state.new_gf.df
self.module = module
self.entry_funcs = {}
self.run()
def run(self):
ret = []
# this should not work because there is no self.state
entire_df = self.state.entire_df
func_in_module = (
self.df[self.df.module == self.module]["name"].unique().tolist()
)
for idx, func in enumerate(func_in_module):
ret.append(
{
"name": func,
"time (inc)": entire_df.loc[entire_df["name"] == func][
"time (inc)"
].tolist(),
"time": entire_df.loc[entire_df["name"] == func]["time"].tolist(),
"rank": entire_df.loc[entire_df["name"] == func]["rank"].tolist(),
}
)
ret_df = | pd.DataFrame(ret) | pandas.DataFrame |
"""Unit tests for Model class
"""
import unittest
import pandas as pd
import torch
from stock_trading_backend.agent import Model
class TestModel(unittest.TestCase):
"""Unit tests for Model class.
"""
def test_initializes(self):
"""Checks if model initializes properly.
"""
model = Model()
self.assertIsNone(model.name)
def test_predict(self):
"""Checks if predict function works properly.
"""
model = Model()
observation = | pd.Series([1, 2, 3], ["balance", "net_worth", "owned"]) | pandas.Series |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
| tm.assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Converter for miRBase Families."""
from typing import Iterable
import pandas as pd
from tqdm import tqdm
from .mirbase_constants import (
get_premature_df,
get_premature_family_df,
get_premature_to_prefamily_df,
)
from ..struct import Obo, Reference, Term, has_member
__all__ = [
"MiRBaseFamilyGetter",
]
PREFIX = "mirbase.family"
class MiRBaseFamilyGetter(Obo):
"""An ontology representation of miRBase's miRNA family nomenclature."""
ontology = PREFIX
bioversions_key = "mirbase"
def iter_terms(self, force: bool = False) -> Iterable[Term]:
"""Iterate over terms in the ontology."""
return iter_terms(version=self._version_or_raise, force=force)
def get_obo(force: bool = False) -> Obo:
"""Get miRBase family as OBO."""
return MiRBaseFamilyGetter(force=force)
def iter_terms(version: str, force: bool = False) -> Iterable[Term]:
"""Get miRBase family terms."""
df = get_df(version, force=force)
for family_id, name, mirna_id, mirna_name in tqdm(df.values, total=len(df.index)):
term = Term(
reference=Reference(prefix=PREFIX, identifier=family_id, name=name),
)
term.append_relationship(
has_member, Reference(prefix="mirna", identifier=mirna_id, name=mirna_name)
)
yield term
def get_df(version: str, force: bool = False) -> pd.DataFrame:
"""Get the miRBase family dataframe."""
mirna_prefamily_df = get_premature_to_prefamily_df(version, force=force)
prefamily_df = get_premature_family_df(version, force=force)
premature_df = get_premature_df(version, force=force)
intermediate_df = pd.merge(
mirna_prefamily_df, prefamily_df, left_on="prefamily_key", right_on="prefamily_key"
)
rv = | pd.merge(intermediate_df, premature_df, left_on="premature_key", right_on="premature_key") | pandas.merge |
"""Dynamic file checks."""
from dataclasses import dataclass
from datetime import date, timedelta
from typing import Dict, Set
import re
import pandas as pd
import numpy as np
from .errors import ValidationFailure, APIDataFetchError
from .datafetcher import get_geo_signal_combos, threaded_api_calls
from .utils import relative_difference_by_min, TimeWindow, lag_converter
class DynamicValidator:
"""Class for validation of static properties of individual datasets."""
@dataclass
class Parameters:
"""Configuration parameters."""
# data source name, one of
# https://cmu-delphi.github.io/delphi-epidata/api/covidcast_signals.html
data_source: str
# span of time over which to perform checks
time_window: TimeWindow
# date that this df_to_test was generated; typically 1 day after the last date in df_to_test
generation_date: date
# number of days back to perform sanity checks, starting from the last date appearing in
# df_to_test
max_check_lookbehind: timedelta
# names of signals that are smoothed (7-day avg, etc)
smoothed_signals: Set[str]
# maximum number of days behind do we expect each signal to be
max_expected_lag: Dict[str, int]
# minimum number of days behind do we expect each signal to be
min_expected_lag: Dict[str, int]
def __init__(self, params):
"""
Initialize object and set parameters.
Arguments:
- params: dictionary of user settings; if empty, defaults will be used
"""
common_params = params["common"]
dynamic_params = params.get("dynamic", dict())
self.test_mode = dynamic_params.get("test_mode", False)
self.params = self.Parameters(
data_source=common_params["data_source"],
time_window=TimeWindow.from_params(common_params["end_date"],
common_params["span_length"]),
generation_date=date.today(),
max_check_lookbehind=timedelta(
days=max(7, dynamic_params.get("ref_window_size", 14))),
smoothed_signals=set(dynamic_params.get("smoothed_signals", [])),
min_expected_lag=lag_converter(common_params.get(
"min_expected_lag", dict())),
max_expected_lag=lag_converter(common_params.get(
"max_expected_lag", dict()))
)
def validate(self, all_frames, report):
"""
Perform all checks over the combined data set from all files.
Parameters
----------
all_frames: pd.DataFrame
combined data from all input files
report: ValidationReport
report to which the results of these checks will be added
"""
# Get 14 days prior to the earliest list date
outlier_lookbehind = timedelta(days=14)
# Get all expected combinations of geo_type and signal.
geo_signal_combos = get_geo_signal_combos(self.params.data_source)
all_api_df = threaded_api_calls(self.params.data_source,
self.params.time_window.start_date - outlier_lookbehind,
self.params.time_window.end_date,
geo_signal_combos)
# Keeps script from checking all files in a test run.
kroc = 0
# Comparison checks
# Run checks for recent dates in each geo-sig combo vs semirecent (previous
# week) API data.
for geo_type, signal_type in geo_signal_combos:
geo_sig_df = all_frames.query(
"geo_type == @geo_type & signal == @signal_type")
# Drop unused columns.
geo_sig_df.drop(columns=["geo_type", "signal"])
report.increment_total_checks()
if geo_sig_df.empty:
report.add_raised_error(ValidationFailure(check_name="check_missing_geo_sig_combo",
geo_type=geo_type,
signal=signal_type,
message="file with geo_type-signal combo "
"does not exist"))
continue
max_date = geo_sig_df["time_value"].max()
self.check_min_allowed_max_date(
max_date, geo_type, signal_type, report)
self.check_max_allowed_max_date(
max_date, geo_type, signal_type, report)
# Get relevant reference data from API dictionary.
api_df_or_error = all_api_df[(geo_type, signal_type)]
report.increment_total_checks()
if isinstance(api_df_or_error, APIDataFetchError):
report.add_raised_error(api_df_or_error)
continue
# Only do outlier check for cases and deaths signals
if (signal_type in ["confirmed_7dav_cumulative_num", "confirmed_7dav_incidence_num",
"confirmed_cumulative_num", "confirmed_incidence_num",
"deaths_7dav_cumulative_num",
"deaths_cumulative_num"]):
# Outlier dataframe
earliest_available_date = geo_sig_df["time_value"].min()
source_df = geo_sig_df.query(
'time_value <= @self.params.time_window.end_date & '
'time_value >= @self.params.time_window.start_date'
)
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
outlier_start_date = earliest_available_date - outlier_lookbehind
outlier_end_date = earliest_available_date - timedelta(days=1)
outlier_api_df = api_df_or_error.query(
'time_value <= @outlier_end_date & time_value >= @outlier_start_date')
# pylint: enable=unused-variable
self.check_positive_negative_spikes(
source_df, outlier_api_df, geo_type, signal_type, report)
# Check data from a group of dates against recent (previous 7 days,
# by default) data from the API.
for checking_date in self.params.time_window.date_seq:
create_dfs_or_error = self.create_dfs(
geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report)
if not create_dfs_or_error:
continue
recent_df, reference_api_df = create_dfs_or_error
self.check_max_date_vs_reference(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
self.check_rapid_change_num_rows(
recent_df, reference_api_df, checking_date, geo_type, signal_type, report)
if not re.search("cumulative", signal_type):
self.check_avg_val_vs_reference(
recent_df, reference_api_df, checking_date, geo_type,
signal_type, report)
# Keeps script from checking all files in a test run.
kroc += 1
if self.test_mode and kroc == 2:
break
def check_min_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too long ago.
The most recent data should be at least max_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if max_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_min_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too long ago"))
report.increment_total_checks()
def check_max_allowed_max_date(self, max_date, geo_type, signal_type, report):
"""Check if time since data was generated is reasonable or too recent.
The most recent data should be at most min_expected_lag from generation date
Arguments:
- max_date: date of most recent data to be validated; datetime format.
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
max_thres = timedelta(days = self.params.min_expected_lag.get(
signal_type, self.params.min_expected_lag.get('all', 1)))
if max_date > self.params.generation_date - max_thres:
report.add_raised_error(
ValidationFailure("check_max_max_date",
geo_type=geo_type,
signal=signal_type,
message="date of most recent generated file seems too recent"))
report.increment_total_checks()
def create_dfs(self, geo_sig_df, api_df_or_error, checking_date, geo_type, signal_type, report):
"""Create recent_df and reference_api_df from params.
Raises error if recent_df is empty.
Arguments:
- geo_sig_df: Pandas dataframe of test data
- api_df_or_error: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- False if recent_df is empty, else (recent_df, reference_api_df)
(after reference_api_df has been padded if necessary)
"""
# recent_lookbehind: start from the check date and working backward in time,
# how many days at a time do we want to check for anomalies?
# Choosing 1 day checks just the daily data.
recent_lookbehind = timedelta(days=1)
recent_cutoff_date = checking_date - \
recent_lookbehind + timedelta(days=1)
recent_df = geo_sig_df.query(
'time_value <= @checking_date & time_value >= @recent_cutoff_date')
report.increment_total_checks()
if recent_df.empty:
min_thres = timedelta(days = self.params.max_expected_lag.get(
signal_type, self.params.max_expected_lag.get('all', 10)))
if checking_date < self.params.generation_date - min_thres:
report.add_raised_error(
ValidationFailure("check_missing_geo_sig_date_combo",
checking_date,
geo_type,
signal_type,
"test data for a given checking date-geo type-signal type"
" combination is missing. Source data may be missing"
" for one or more dates"))
return False
# Reference dataframe runs backwards from the recent_cutoff_date
#
# These variables are interpolated into the call to `api_df_or_error.query()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
reference_start_date = recent_cutoff_date - self.params.max_check_lookbehind
if signal_type in self.params.smoothed_signals:
# Add an extra 7 days to the reference period.
reference_start_date = reference_start_date - \
timedelta(days=7)
reference_end_date = recent_cutoff_date - timedelta(days=1)
# pylint: enable=unused-variable
# Subset API data to relevant range of dates.
reference_api_df = api_df_or_error.query(
"time_value >= @reference_start_date & time_value <= @reference_end_date")
report.increment_total_checks()
if reference_api_df.empty:
report.add_raised_error(
ValidationFailure("empty_reference_data",
checking_date,
geo_type,
signal_type,
"reference data is empty; comparative checks could not "
"be performed"))
return False
reference_api_df = self.pad_reference_api_df(
reference_api_df, geo_sig_df, reference_end_date)
return (geo_sig_df, reference_api_df)
def pad_reference_api_df(self, reference_api_df, geo_sig_df, reference_end_date):
"""Check if API data is missing, and supplement from test data.
Arguments:
- reference_api_df: API data within lookbehind range
- geo_sig_df: Test data
- reference_end_date: Supposed end date of reference data
Returns:
- reference_api_df: Supplemented version of original
"""
reference_api_df_max_date = reference_api_df.time_value.max()
if reference_api_df_max_date < reference_end_date:
# Querying geo_sig_df, only taking relevant rows
geo_sig_df_supplement = geo_sig_df.query(
'time_value <= @reference_end_date & time_value > \
@reference_api_df_max_date')[[
"geo_id", "val", "se", "sample_size", "time_value"]]
# Matching time_value format
geo_sig_df_supplement["time_value"] = \
pd.to_datetime(geo_sig_df_supplement["time_value"],
format = "%Y-%m-%d %H:%M:%S")
reference_api_df = pd.concat(
[reference_api_df, geo_sig_df_supplement])
return reference_api_df
def check_max_date_vs_reference(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Check if reference data is more recent than test data.
Arguments:
- df_to_test: pandas dataframe of a single CSV of source data
(one day-signal-geo_type combo)
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
if df_to_test["time_value"].max() < df_to_reference["time_value"].max():
report.add_raised_error(
ValidationFailure("check_max_date_vs_reference",
checking_date,
geo_type,
signal_type,
"reference df has days beyond the max date in the =df_to_test="))
report.increment_total_checks()
def check_rapid_change_num_rows(self, df_to_test, df_to_reference, checking_date,
geo_type, signal_type, report):
"""
Compare number of obervations per day in test dataframe vs reference dataframe.
Arguments:
- df_to_test: pandas dataframe of CSV source data
- df_to_reference: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- checking_date: datetime date
- geo_type: str; geo type name (county, msa, hrr, state) as in the CSV name
- signal_type: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
Returns:
- None
"""
test_rows_per_reporting_day = df_to_test[df_to_test['time_value']
== checking_date].shape[0]
reference_rows_per_reporting_day = df_to_reference.shape[0] / len(
set(df_to_reference["time_value"]))
try:
compare_rows = relative_difference_by_min(
test_rows_per_reporting_day,
reference_rows_per_reporting_day)
except ZeroDivisionError as e:
print(checking_date, geo_type, signal_type)
raise e
if abs(compare_rows) > 0.35:
report.add_raised_error(
ValidationFailure("check_rapid_change_num_rows",
checking_date,
geo_type,
signal_type,
"Number of rows per day seems to have changed rapidly (reference "
"vs test data)"))
report.increment_total_checks()
def check_positive_negative_spikes(self, source_df, api_frames, geo, sig, report):
"""
Adapt Dan's corrections package to Python (only consider spikes).
See https://github.com/cmu-delphi/covidcast-forecast/tree/dev/corrections/data_corrections
Statistics for a right shifted rolling window and a centered rolling window are used
to determine outliers for both positive and negative spikes.
As it is now, ststat will always be NaN for source frames.
Arguments:
- source_df: pandas dataframe of CSV source data
- api_frames: pandas dataframe of reference data, either from the
COVIDcast API or semirecent data
- geo: str; geo type name (county, msa, hrr, state) as in the CSV name
- sig: str; signal name as in the CSV name
- report: ValidationReport; report where results are added
"""
report.increment_total_checks()
# Combine all possible frames so that the rolling window calculations make sense.
source_frame_start = source_df["time_value"].min()
# This variable is interpolated into the call to `add_raised_error()`
# below but pylint doesn't recognize that.
# pylint: disable=unused-variable
source_frame_end = source_df["time_value"].max()
# pylint: enable=unused-variable
all_frames = pd.concat([api_frames, source_df]). \
drop_duplicates(subset=["geo_id", "time_value"], keep='last'). \
sort_values(by=['time_value']).reset_index(drop=True)
# Tuned Variables from Dan's Code for flagging outliers. Size_cut is a
# check on the minimum value reported, sig_cut is a check
# on the ftstat or ststat reported (t-statistics) and sig_consec
# is a lower check for determining outliers that are next to each other.
size_cut, sig_cut, sig_consec = 5, 3, 2.25
# Functions mapped to rows to determine outliers based on fstat and ststat values
def outlier_flag(frame):
if (abs(frame["val"]) > size_cut) and not ( | pd.isna(frame["ststat"]) | pandas.isna |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = | Series([0.2, 0.2, 0.1], index=[2, 2, 1]) | pandas.core.api.Series |
import warnings
import pandas as pd
import numpy as np
__all__ = ['Pandas2numpy']
def assert_list_contains_all(l, l_subset):
"Raise a warning if some columns from `l_subset` do not exist in `l`."
non_existing_columns = set(l_subset).difference(l)
if len(non_existing_columns) > 0:
non_existing_columns_names = repr(list(non_existing_columns))
warnings.warn("The columns " + non_existing_columns_names + " are not present in the dataframe and will be ignored!")
def list_intersection(l, l_superset):
"Returns the intersection of `l` and `l_superset`."
return sorted(list(set(l).intersection(l_superset))) # sort to garantee reproducible column order
def safe_log(x, epsilon=0.0):
"A logarithm modified to avoid Nan"
return np.log(epsilon + np.abs(x))
def safe_log_reciprocal(x, epsilon=0.0):
"Function such that `safe_log_reciprocal(safe_log(x)) = x`"
return np.exp(x) - epsilon
class Pandas2numpy():
"Dataframe to tensor converter for deep learning."
def __init__(self, dataframe, continuous_columns=[], categorical_columns=[],
normalized_columns=[], NA_columns=[], logscale_columns=[],
log_epsilon=0.0):
"""
Stores information to be able to convert Pandas dataframe to Numpy array and back
`dataframe` is an example dataframe used to determine all possible values in categories and store statistic for normalization and NA replacement
`continuous_columns` is the name of the column containing continuous data to be encoded
`categorical_columns` is the name of the columns containing categorical data to be encoded
`normalized_columns` is the name of the columns that should be normalized by substracting the mean and dividing by the standard deviation
`NA_columns` is the name of the coluns that might contain NA, cetgorical column will use an additional label while continuous column will replace NA with the median and store the presence of NA in an additional categorial column
`logscale_columns` is the name of the columns to which a logarithm should be aplied (note it will take an absolute value and add `log_epsilon` before the logarithm to avoid producing Nan)
`log_epsilon` is a value that can be added before taking the logarithm to avoid getting Nan on logarithms of zeros
"""
# insures that all columns names are valid
all_columns = dataframe.columns
assert_list_contains_all(all_columns, continuous_columns)
assert_list_contains_all(all_columns, categorical_columns)
assert_list_contains_all(all_columns, normalized_columns)
assert_list_contains_all(all_columns, logscale_columns)
assert_list_contains_all(all_columns, NA_columns)
# stores target column names
self.continuous_columns = list_intersection(continuous_columns, all_columns)
self.categorical_columns = list_intersection(categorical_columns, all_columns)
self.normalized_columns = list_intersection(normalized_columns, self.continuous_columns)
self.logscale_columns = list_intersection(logscale_columns, self.continuous_columns)
self.NA_cont_columns = list_intersection(NA_columns, self.continuous_columns)
self.NA_cat_columns = list_intersection(NA_columns, self.categorical_columns)
self.log_epsilon = log_epsilon
# apply logscale transformation in order to measure normalization info in proper scale
transformed_df = dataframe.loc[:, self.continuous_columns]
transformed_df.loc[:, self.logscale_columns] = transformed_df.loc[:, self.logscale_columns].apply(lambda x: safe_log(x, self.log_epsilon))
# stores normalization info
self.normalized_columns_means = transformed_df[self.normalized_columns].mean(skipna=True)
self.normalized_columns_std = transformed_df[self.normalized_columns].std(skipna=True)
# stores median info for NA replacement
self.NA_cont_columns_medians = dataframe[self.NA_cont_columns].median(skipna=True)
# stores info on categories encoding
self.category_dtypes = dataframe[self.categorical_columns].astype('category').dtypes
# stores number of category per categorical column (useful to find embeding sizes and such)
self.nb_category_per_categorical_column = []
for (col_index, col_name) in enumerate(self.categorical_columns):
# counts number of category per column
nb_label = len(self.category_dtypes[col_index].categories)
# adds one category when NA is a possibility
if col_name in self.NA_cat_columns: nb_label += 1
self.nb_category_per_categorical_column.append(nb_label)
# adds the categorical columns created by NA_cont_columns
for _ in range(len(self.NA_cont_columns)): self.nb_category_per_categorical_column.append(2) # 2 categories as those are booleans
self.nb_category_per_categorical_column = np.array(self.nb_category_per_categorical_column)
#--------------------------------------------------------------------------
# ENCODING
def continuous_to_numpy(self, df):
"""
takes a dataframe and encode the `continuous_columns` as a tensor
the NA in `NA_columns` are replaced with the medians of the columns in the example dataset
takes the logarithm of the `logscale_columns` columns
normalize the `normalized_columns` using a mean and standard deviation extracted from the example dataset
`df` is the dataframe to be encoded
"""
df = df.loc[:, self.continuous_columns]
# replace NA with median
df.loc[:, self.NA_cont_columns] = df.loc[:, self.NA_cont_columns].fillna(self.NA_cont_columns_medians)
# takes logarithm of some columns
df.loc[:, self.logscale_columns] = df.loc[:, self.logscale_columns].apply(lambda x: safe_log(x, self.log_epsilon))
# normalizes some columns
df.loc[:, self.normalized_columns] = (df.loc[:, self.normalized_columns] - self.normalized_columns_means) / self.normalized_columns_std
return df.to_numpy()
def categorial_to_numpy(self, df, include_continuous_NA_info=True):
"""
takes a dataframe and encode the `categorical_columns` as a tensor of integers
the NA in `NA_columns` are encoded as the 0 label of their respective columns
addition columns are added to encode whether a continuous column in the `NA_columns` contained a NA
`include_continuous_NA_info` should be set to true if you want additional columns encoding whether continuous variables contain NA
"""
# encodes whether a cont column contained an NA (that was replaced by a median)
if include_continuous_NA_info: continuous_col_isNA = df[self.NA_cont_columns].isna().astype(int)
# encodes data as categories using predefined categories
df = df.loc[:, self.categorical_columns].astype(self.category_dtypes).apply(lambda x: x.cat.codes)
# NA have code -1 by default, insures all codes are positive
df.loc[:, self.NA_cat_columns] += 1
# adds columns encoding whether continuous columns where NA
if include_continuous_NA_info: df = pd.concat((df, continuous_col_isNA), axis=1)
return df.to_numpy()
def to_numpy(self, df, include_continuous_NA_info=True):
"""
takes a dataframe and encode it as a pair `(tensor_cont,tensor_cat)`
where `tensor_cont` stores the continuous columns
and `tensor_cat` stores the categorial columns
`include_continuous_NA_info` should be set to true if you want additional categorical columns encoding whether continuous variables contain NA
"""
tensor_cont = self.continuous_to_numpy(df)
tensor_cat = self.categorial_to_numpy(df, include_continuous_NA_info=include_continuous_NA_info)
return (tensor_cont, tensor_cat)
#--------------------------------------------------------------------------
# DECODING
def continuous_from_numpy(self, tensor_cont, copy=True):
"""
takes a tensor and decodes it as a dataframe with columns `continuous_columns`
removes the effects of normalization and logarithmic transformation
you can set the `copy` argument to `False` to reduce memory usage but the decoding will modify your tensor in place
WARNING: values that are notified as NA in the categorial tensor are left untouched here
"""
df = pd.DataFrame(data=tensor_cont, columns=self.continuous_columns, copy=copy)
# removes normalization
df.loc[:, self.normalized_columns] = (df.loc[:, self.normalized_columns] * self.normalized_columns_std) + self.normalized_columns_means
# removes logarithms
df.loc[:, self.logscale_columns] = df.loc[:, self.logscale_columns].apply(lambda x: safe_log_reciprocal(x, self.log_epsilon))
# nothing to do to reinject NAs at this stage
return df
def categorial_from_numpy(self, tensor_cat):
"""
takes a tensor and decodes it as a dataframe with columns `categorical_columns`
the columns encoding the presence of NA in continuous variables are ignored
"""
columns = []
# decodes one columns at a time
# the columns encoding wether a cont column contained an NA are ignored
for (col_index, col_name) in enumerate(self.categorical_columns):
codes = tensor_cat[:,col_index]
# gets NA back to code -1
if col_name in self.NA_cat_columns: codes -= 1
# translates codes in to their categories
categories = self.category_dtypes[col_index].categories
column = | pd.Categorical.from_codes(codes, categories=categories) | pandas.Categorical.from_codes |
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y= | pd.DataFrame(compared_db) | pandas.DataFrame |
from datetime import datetime as dt
import os
import pandas as pd
import ntpath
import numpy as np
import math
from distutils.dir_util import copy_tree
from shutil import rmtree
import sqlite3
# 'cleanData' is taking the data that was imported from 'http://football-data.co.uk/'
# and 'cleaning' the data so that only necessary factors are used for testing.
# This function is used to make a directory.
def make_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# If a directory already exists it will be removed.
def rmv_dir(path):
if os.path.exists(path):
rmtree(path)
# This function is used to copy a file/folder.
def copy_csv(from_path, to_path):
make_directory(to_path)
if os.path.isfile(from_path):
with open(to_path, 'w') as to_file, open(from_path, 'r') as from_file:
for line in from_file:
to_file.write(line)
elif os.path.isdir(from_path):
copy_tree(from_path, to_path)
else:
raise ValueError("Copy_CSV Error. File either does not exist, or is an unsupported file type")
# clean the original raw_data data by storing only the columns that we need, and removing the rest.
def clean(from_path, to_path, columns):
def convert_date(date):
if date == '':
return None
else:
_, file = ntpath.split(to_path)
if len(date.split('-')) == 3:
return date
else:
return dt.strptime(date, '%d/%m/%y').date()
# The convert Score function will check to see if the score is 'Not a Number'(NaN).
# The latter part of this conditional statement will more than likely be used more.
def convert_score(score):
if math.isnan(score):
return score
else:
return int(score)
df = pd.read_csv(from_path, error_bad_lines=False)
df = df[columns]
df = df[pd.notnull(df['Date'])]
df['FTHG'] = df['FTHG'].apply(convert_score)
df['FTAG'] = df['FTAG'].apply(convert_score)
df['Date'] = df['Date'].apply(convert_date)
head, _ = ntpath.split(to_path)
if not os.path.exists(head):
os.makedirs(head)
df.to_csv(to_path, index=False)
# This function is cleaning the data in the raw_data folder from every year.
def clean_everything(from_folder, to_folder, columns, from_year, to_year):
for year in range(from_year, to_year + 1):
csv = '{}-{}.csv'.format(year, year + 1)
frompath = os.path.join(from_folder, csv)
topath = os.path.join(to_folder, csv)
print("Cleaning data", frompath, "...")
clean(frompath, topath, columns)
# The years are then concatenated through this function.
def combine_games(cleaned_folder_path, final_path, start_year, end_year, make_file=True):
print("Combining matches played from {} to {}...".format(start_year, end_year))
dfList = []
for year in range(start_year, end_year + 1):
file = '{}-{}.csv'.format(year, year + 1)
path = os.path.join(cleaned_folder_path, file)
df = pd.read_csv(path)
dfList.append(df)
df = pd.concat(dfList, ignore_index=True, sort=False)
if make_file:
df.to_csv(final_path, index=False)
return df
def get_match_results_against(file_path, cleaned_folder_path, final_path, from_year, to_year):
print("Getting head-to-head results...")
team_detail, match_detail = {}, {}
match_detail_columns = [
'HT_win_rate_against',
'AT_win_rate_against'
]
for item in match_detail_columns:
match_detail[item] = []
# Get head-to-head result from fromYear to toYear
df = combine_games(cleaned_folder_path, final_path, from_year, to_year, make_file=False)
for index, row in df.iterrows():
home_team = row['HomeTeam']
away_team = row['AwayTeam']
if home_team not in team_detail:
team_detail[home_team] = {}
if away_team not in team_detail:
team_detail[away_team] = {}
if away_team not in team_detail[home_team]:
team_detail[home_team][away_team] = {
'match_played': 0,
'win': 0
}
if home_team not in team_detail[away_team]:
team_detail[away_team][home_team] = {
'match_played': 0,
'win': 0
}
TD_HT_AT = team_detail[home_team][away_team]
TD_AT_HT = team_detail[away_team][home_team]
home_team_win_rate = TD_HT_AT['win'] / TD_HT_AT['match_played'] if TD_HT_AT['match_played'] > 0 else np.nan
away_team_win_rate = TD_AT_HT['win'] / TD_AT_HT['match_played'] if TD_AT_HT['match_played'] > 0 else np.nan
match_detail['HT_win_rate_against'].append(home_team_win_rate)
match_detail['AT_win_rate_against'].append(away_team_win_rate)
TD_HT_AT['match_played'] += 1
TD_AT_HT['match_played'] += 1
game_result = row['FTR']
if game_result == 'H':
TD_HT_AT['win'] += 1
elif game_result == 'A':
TD_AT_HT['win'] += 1
# Only take the last x results of df and combine with filedf.
# This is because we don't always want to merge all data from 1993 to 2018
filed_f = | pd.read_csv(file_path) | pandas.read_csv |
"""
.. module:: linregress
:platform: Unix
:synopsis: Contains methods for doing linear regression.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from disaggregator import GreenButtonDatasetAdapter as gbda
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
def run_regressions(trace_series,temps_series,cal_hdd_temp_range=range(50,60),
cal_cdd_temp_range=range(60,75),plot=False):
'''
Takes in a series from a trace and a temperature series and runs linear regressions
over a range of cooling and heating setpoints. For each linear regression, temperature
values above the setpoint are used, with temps below the cooling setpoint (and above
the heating setpoint) are set to the setpoint. This is to make the linear regression
similar to those conducted for cooling and heating degree days. This method outputs
a dictionary containing the best slopes and intercepts, as well as their corresponding
setpoint temperatures and adjusted r2 values.
'''
results_dict = {}
df_trace = pd.DataFrame(trace_series,columns=['kwh'])
df_trace = df_trace.sort_index()
best_r2_adj_cool = float("-inf")
best_r2_adj_heat = float("-inf")
best_cdd_temp = 0
best_hdd_temp = 0
slope_cdd = None
slope_hdd = None
intercept_hdd = None
intercept_cdd = None
results_cdd = None
results_hdd = None
df_all_best_cool = None
df_all_best_heat = None
df_temps=pd.DataFrame(temps_series,columns=['temp'])
for cdd_setpoint in cal_cdd_temp_range:
df_temps_dropped=df_temps.drop(df_temps[df_temps['temp']<=cdd_setpoint].index)
df_all = | pd.merge(df_trace,df_temps_dropped,left_index=True,right_index=True) | pandas.merge |
from six import string_types, text_type, PY2
from docassemble.webapp.core.models import MachineLearning
from docassemble.base.core import DAObject, DAList, DADict
from docassemble.webapp.db_object import db
from sqlalchemy import or_, and_
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
import re
import random
import codecs
from io import open
if PY2:
import cPickle as pickle
else:
import pickle
import datetime
import os
import yaml
import json
import sys
from pattern.vector import count, KNN, SVM, stem, PORTER, words, Document
from docassemble.base.logger import logmessage
from docassemble.webapp.backend import get_info_from_file_reference
from docassemble.webapp.fixpickle import fix_pickle_obj
import docassemble.base.functions
learners = dict()
svms = dict()
lastmodtime = dict()
reset_counter = dict()
class MachineLearningEntry(DAObject):
"""An entry in the machine learning system"""
def classify(self, dependent=None):
"""Sets the dependent variable of the machine learning entry"""
if dependent is not None:
self.dependent = dependent
self.ml.set_dependent_by_id(self.id, self.dependent)
return self
def save(self):
"""Saves the entry to the data set. The independent variable must be
defined in order to save."""
args = dict(independent=self.independent)
if hasattr(self, 'dependent'):
args['dependent'] = self.dependent
if hasattr(self, 'key'):
args['key'] = self.key
if hasattr(self, 'id'):
args['id'] = self.id
if hasattr(self, 'info') and self.info is not None:
args['info'] = self.info
self.ml._save_entry(**args)
return self
def predict(self, probabilities=False):
"""Returns predictions for this entry's independent variable."""
return self.ml.predict(self.independent, probabilities=probabilities)
class MachineLearner(object):
"""Base class for machine learning objects"""
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0:
if ':' in pargs[0]:
raise Exception("MachineLearner: you cannot use a colon in a machine learning name")
question = docassemble.base.functions.get_current_question()
if question is not None:
self.group_id = question.interview.get_ml_store() + ':' + pargs[0]
else:
self.group_id = pargs[0]
if len(pargs) > 1:
self.initial_file = pargs[1]
if 'group_id' in kwargs:
self.group_id = kwargs['group_id']
if 'initial_file' in kwargs:
self.initial_file = kwargs['initial_file']
if kwargs.get('use_initial_file', False):
question = docassemble.base.functions.get_current_question()
if question is not None:
self.initial_file = question.interview.get_ml_store()
self.reset_counter = 0
def reset(self):
self.reset_counter += 1
def _initialize(self, reset=False):
if hasattr(self, 'initial_file'):
self.start_from_file(self.initial_file)
if hasattr(self, 'group_id') and (self.group_id not in lastmodtime or reset):
lastmodtime[self.group_id] = datetime.datetime(year=1970, month=1, day=1)
reset_counter = self.reset_counter
def export_training_set(self, output_format='json', key=None):
self._initialize()
output = list()
for entry in self.classified_entries(key=key):
the_entry = dict(independent=entry.independent, dependent=entry.dependent)
if entry.info is not None:
the_entry['info'] = entry.info
output.append(the_entry)
if output_format == 'json':
return json.dumps(output, sort_keys=True, indent=4)
elif output_format == 'yaml':
return yaml.safe_dump(output, default_flow_style=False)
else:
raise Exception("Unknown output format " + str(output_format))
def dependent_in_use(self, key=None):
in_use = set()
if key is None:
query = db.session.query(MachineLearning.dependent).filter(MachineLearning.group_id == self.group_id).group_by(MachineLearning.dependent)
else:
query = db.session.query(MachineLearning.dependent).filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.key == key)).group_by(MachineLearning.dependent)
for record in query:
if record.dependent is not None:
in_use.add(fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
return sorted(in_use)
def is_empty(self):
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()
if existing_entry is None:
return True
return False
def start_from_file(self, fileref):
#logmessage("Starting from file " + str(fileref))
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()
if existing_entry is not None:
return
file_info = get_info_from_file_reference(fileref, folder='sources')
if 'fullpath' not in file_info or file_info['fullpath'] is None or not os.path.exists(file_info['fullpath']):
return
#raise Exception("File reference " + str(fileref) + " is invalid")
with open(file_info['fullpath'], 'rU', encoding='utf-8') as fp:
content = fp.read()
if 'mimetype' in file_info and file_info['mimetype'] == 'application/json':
aref = json.loads(content)
elif 'extension' in file_info and file_info['extension'].lower() in ['yaml', 'yml']:
aref = yaml.load(content, Loader=yaml.FullLoader)
if type(aref) is dict and hasattr(self, 'group_id'):
the_group_id = re.sub(r'.*:', '', self.group_id)
if the_group_id in aref:
aref = aref[the_group_id]
if type(aref) is list:
nowtime = datetime.datetime.utcnow()
for entry in aref:
if 'independent' in entry:
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(entry.get('dependent', None)), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None), info=codecs.encode(pickle.dumps(entry['info']), 'base64').decode() if entry.get('info', None) is not None else None)
db.session.add(new_entry)
db.session.commit()
def add_to_training_set(self, independent, dependent, key=None, info=None):
self._initialize()
nowtime = datetime.datetime.utcnow()
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(independent), 'base64').decode(), dependent=codecs.encode(pickle.dumps(dependent), 'base64').decode(), info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None, create_time=nowtime, modtime=nowtime, active=True, key=key)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def save_for_classification(self, indep, key=None, info=None):
self._initialize()
if key is None:
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, dependent=None, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()
else:
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()
if existing_entry is not None:
logmessage("entry is already there")
return existing_entry.id
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(indep), 'base64').decode(), create_time=datetime.datetime.utcnow(), active=False, key=key, info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def retrieve_by_id(self, the_id):
self._initialize()
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()
if existing_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if existing_entry.dependent:
dependent = fix_pickle_obj(codecs.decode(bytearray(existing_entry.dependent, encoding='utf-8'), 'base64'))
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), dependent=dependent, create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
else:
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
def one_unclassified_entry(self, key=None):
self._initialize()
if key is None:
entry = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).first()
else:
entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).first()
if entry is None:
return None
return MachineLearningEntry(ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)._set_instance_name_for_method()
def new_entry(self, **kwargs):
return MachineLearningEntry(ml=self, **kwargs)._set_instance_name_for_method()
def unclassified_entries(self, key=None):
self._initialize()
results = DAList()._set_instance_name_for_method()
results.gathered = True
if key is None:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).all()
else:
query = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).all()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)
return results
def classified_entries(self, key=None):
self._initialize()
results = DAList()
results.gathered = True
results.set_random_instance_name()
if key is None:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=True).order_by(MachineLearning.id).all()
else:
query = MachineLearning.query.filter_by(group_id=self.group_id, active=True, key=key).order_by(MachineLearning.id).all()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), dependent=fix_pickle_obj(codecs.decode(bytearray(entry.dependent, encoding='utf-8'), 'base64')), info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None, create_time=entry.create_time, key=entry.key)
return results
def _save_entry(self, **kwargs):
self._initialize()
the_id = kwargs.get('id', None)
need_to_reset = False
if the_id is None:
the_entry = MachineLearning(group_id=self.group_id)
existing = False
else:
the_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()
existing = True
if the_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if 'dependent' in kwargs:
if existing and the_entry.dependent is not None and the_entry.dependent != kwargs['dependent']:
need_to_reset = True
the_entry.dependent = codecs.encode(pickle.dumps(kwargs['dependent']), 'base64').decode()
the_entry.active = True
if 'independent' in kwargs:
if existing and the_entry.independent is not None and the_entry.independent != kwargs['independent']:
need_to_reset = True
the_entry.independent = codecs.encode(pickle.dumps(kwargs['independent']), 'base64').decode()
if 'key' in kwargs:
the_entry.key = kwargs['key']
if 'info' in kwargs:
the_entry.info = codecs.encode(pickle.dumps(kwargs['info']), 'base64').decode()
the_entry.modtime = datetime.datetime.utcnow()
if not existing:
db.session.add(the_entry)
db.session.commit()
if need_to_reset:
self.reset()
def set_dependent_by_id(self, the_id, the_dependent):
self._initialize()
existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).with_for_update().first()
if existing_entry is None:
db.session.commit()
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
existing_entry.dependent = codecs.encode(pickle.dumps(the_dependent), 'base64').decode()
existing_entry.modtime = datetime.datetime.utcnow()
existing_entry.active = True
db.session.commit()
def delete_by_id(self, the_id):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).delete()
db.session.commit()
self.reset()
def delete_by_key(self, key):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id, key=key).delete()
db.session.commit()
self.reset()
def save(self):
db.session.commit()
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():
#logmessage("Training...")
self._train(fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
success = True
lastmodtime[self.group_id] = nowtime
return success
def delete_training_set(self):
self._initialize()
MachineLearning.query.filter_by(group_id=self.group_id).all().delete()
db.session.commit()
def _train(self, indep, depend):
pass
def _predict(self, indep):
pass
class SimpleTextMachineLearner(MachineLearner):
"""A class used to interact with the machine learning system, using the K Nearest Neighbors method"""
def _learner(self):
return KNN()
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = self._learner()
return super(SimpleTextMachineLearner, self)._initialize(reset=need_to_reset)
def _train(self, indep, depend):
"""Trains the machine learner given an independent variable and a corresponding dependent variable."""
if indep is None:
return
the_text = re.sub(r'[\n\r]+', r' ', indep).lower()
learners[self.group_id].train(Document(the_text.lower(), stemmer=PORTER), depend)
def predict(self, indep, probabilities=False):
"""Returns a list of predicted dependent variables for a given independent variable."""
indep = re.sub(r'[\n\r]+', r' ', indep).lower()
if not self._train_from_db():
return list()
probs = dict()
for key, value in learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER), discrete=False).items():
probs[key] = value
if not len(probs):
single_result = learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER))
if single_result is not None:
probs[single_result] = 1.0
if probabilities:
return [(x, probs[x]) for x in sorted(probs.keys(), key=probs.get, reverse=True)]
else:
return sorted(probs.keys(), key=probs.get, reverse=True)
def confusion_matrix(self, key=None, output_format=None, split=False):
"""Returns a confusion matrix for the model based on splitting the data set randomly into two pieces, training on one and testing on the other"""
if split:
list_of_dependent = self.dependent_in_use(key=key)
else:
list_of_dependent = [None]
output = ''
matrices = dict()
for current_dep in list_of_dependent:
testing_set = list()
model = self._learner()
for record in self.classified_entries(key=key):
if split:
dep_result = str(record.dependent == current_dep)
else:
dep_result = record.dependent
if random.random() < 0.5:
model.train(Document(record.independent.lower(), stemmer=PORTER), dep_result)
else:
testing_set.append((Document(record.independent.lower(), stemmer=PORTER), dep_result))
matrix = model.confusion_matrix(documents=testing_set)
matrices[current_dep] = matrix
if output_format == 'html':
if split:
output += '<h4>' + current_dep + "</h4>"
vals = matrix.keys()
output += '<table class="table table-bordered"><thead><tr><td></td><td></td><td style="text-align: center" colspan="' + str(len(vals)) + '">Actual</td></tr><tr><th></th><th></th>'
first = True
for val in vals:
output += '<th>' + val + '</th>'
output += '</tr></thead><tbody>'
for val_a in vals:
output += '<tr>'
if first:
output += '<td style="text-align: right; vertical-align: middle;" rowspan="' + str(len(vals)) + '">Predicted</td>'
first = False
output += '<th>' + val_a + '</th>'
for val_b in vals:
output += '<td>' + str(matrix[val_b].get(val_a, 0)) + '</td>'
output += '</tr>'
output += '</tbody></table>'
#output += "\n\n`" + str(matrix) + "`"
# output += '<ul>'
# for document, actual in testing_set:
# predicted = model.classify(document)
# output += '<li>Predicted: ' + predicted + '; Actual: ' + actual + '</li>'
# output += '</ul>'
if output_format == 'html':
return output
if split:
ret_val = matrices
else:
ret_val = matrices[None]
if output_format == 'json':
return json.dumps(ret_val, sort_keys=True, indent=4)
if output_format == 'yaml':
return yaml.safe_dump(ret_val, default_flow_style=False)
if output_format is None:
return ret_val
return ret_val
def reset(self):
"""Clears the cache of the machine learner"""
return super(SimpleTextMachineLearner, self).reset()
def delete_training_set(self):
"""Deletes all of the training data in the database"""
return super(SimpleTextMachineLearner, self).delete_training_set()
def delete_by_key(self, key):
"""Deletes all of the training data in the database that was added with a given key"""
return super(SimpleTextMachineLearner, self).delete_training_set(key)
def delete_by_id(self, the_id):
"""Deletes the entry in the training data with the given ID"""
return super(SimpleTextMachineLearner, self).delete_by_id(the_id)
def set_dependent_by_id(self, the_id, depend):
"""Sets the dependent variable for the entry in the training data with the given ID"""
return super(SimpleTextMachineLearner, self).set_dependent_by_id(the_id, depend)
def classified_entries(self, key=None):
"""Returns a list of entries in the data that have been classified."""
return super(SimpleTextMachineLearner, self).classified_entries(key=key)
def unclassified_entries(self, key=None):
"""Returns a list of entries in the data that have not yet been classified."""
return super(SimpleTextMachineLearner, self).unclassified_entries(key=key)
def one_unclassified_entry(self, key=None):
"""Returns the first entry in the data that has not yet been classified, or None if all entries have been classified."""
return super(SimpleTextMachineLearner, self).one_unclassified_entry(key=key)
def retrieve_by_id(self, the_id):
"""Returns the entry in the data that has the given ID."""
return super(SimpleTextMachineLearner, self).retrieve_by_id(the_id)
def save_for_classification(self, indep, key=None, info=None):
"""Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry."""
return super(SimpleTextMachineLearner, self).save_for_classification(indep, key=key, info=info)
def add_to_training_set(self, indep, depend, key=None, info=None):
"""Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry."""
return super(SimpleTextMachineLearner, self).add_to_training_set(indep, depend, key=key, info=info)
def is_empty(self):
"""Returns True if no data have been defined, otherwise returns False."""
return super(SimpleTextMachineLearner, self).is_empty()
def dependent_in_use(self, key=None):
"""Returns a sorted list of unique dependent variables in the data."""
return super(SimpleTextMachineLearner, self).dependent_in_use(key=key)
def export_training_set(self, output_format='json'):
"""Returns the classified entries in the data as JSON or YAML."""
return super(SimpleTextMachineLearner, self).export_training_set(output_format=output_format)
def new_entry(self, **kwargs):
"""Creates a new entry in the data."""
return super(SimpleTextMachineLearner, self).new_entry(**kwargs)
class SVMMachineLearner(SimpleTextMachineLearner):
"""Machine Learning object using the Symmetric Vector Machine method"""
def _learner(self):
return SVM(extension='libsvm')
class RandomForestMachineLearner(MachineLearner):
def _learner(self):
return RandomForestClassifier(n_jobs=2)
def feature_importances(self):
"""Returns the importances of each of the features"""
if not self._train_from_db():
return list()
return learners[self.group_id]['learner'].feature_importances_
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = dict(learner=self._learner(), dep_type=None, indep_type=dict(), indep_categories=dict(), dep_categories=None)
return super(RandomForestMachineLearner, self)._initialize(reset=need_to_reset)
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
data = list()
depend_data = list()
for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():
indep_var = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
depend_var = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
if type(depend_var) is str:
depend_var = text_type(depend_var)
if learners[self.group_id]['dep_type'] is not None:
if type(depend_var) is not learners[self.group_id]['dep_type']:
if type(depend_var) is int and learners[self.group_id]['dep_type'] is float:
depend_var = float(depend_var)
elif type(depend_var) is float and learners[self.group_id]['dep_type'] is int:
learners[self.group_id]['dep_type'] = float
else:
raise Exception("RandomForestMachineLearner: dependent variable type was not consistent")
else:
if not isinstance(depend_var, (string_types, int, bool, float)):
raise Exception("RandomForestMachineLearner: dependent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['dep_type'] = type(depend_var)
depend_data.append(depend_var)
if isinstance(indep_var, DADict):
indep_var = indep_var.elements
if type(indep_var) is not dict:
raise Exception("RandomForestMachineLearner: independent variable was not a dictionary")
for key, val in indep_var.items():
if type(val) is str:
val = text_type(val)
if key in learners[self.group_id]['indep_type']:
if type(val) is not learners[self.group_id]['indep_type'][key]:
if type(val) is int and learners[self.group_id]['indep_type'][key] is float:
val = float(val)
elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:
learners[self.group_id]['indep_type'][key] = float
else:
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not consistent")
else:
if not isinstance(val, (string_types, int, bool, float)):
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['indep_type'][key] = type(val)
data.append(indep_var)
success = True
if success:
df = pd.DataFrame(data)
for key, val in learners[self.group_id]['indep_type'].items():
if val is text_type:
df[key] = pd.Series(df[key], dtype="category")
learners[self.group_id]['indep_categories'][key] = df[key].cat.categories
df = pd.get_dummies(df, dummy_na=True)
if learners[self.group_id]['dep_type'] is text_type:
y = pd.Series(depend_data, dtype="category")
learners[self.group_id]['dep_categories'] = y.cat.categories
else:
y = pd.Series(depend_data)
learners[self.group_id]['learner'].fit(df, list(y))
lastmodtime[self.group_id] = nowtime
return success
def predict(self, indep, probabilities=False):
"""Returns a list of predicted dependent variables for a given independent variable."""
if not self._train_from_db():
return list()
if isinstance(indep, DADict):
indep = indep.elements
if type(indep) is not dict:
raise Exception("RandomForestMachineLearner: independent variable was not a dictionary")
indep = process_independent_data(indep)
indep_to_use = dict()
for key, val in indep.items():
if key in learners[self.group_id]['indep_type']:
if type(val) is str:
val = text_type(val)
if type(val) is not learners[self.group_id]['indep_type'][key]:
if type(val) is int and learners[self.group_id]['indep_type'][key] is float:
val = float(val)
elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:
learners[self.group_id]['indep_type'][key] = float
else:
raise Exception("RandomForestMachineLearner: the independent variable type for key " + repr(key) + " was not consistent. Stored was " + str(learners[self.group_id]['indep_type'][key]) + " and type was " + str(type(val)))
else:
raise Exception("RandomForestMachineLearner: independent variable key " + repr(key) + " was not recognized")
if isinstance(val, string_types):
if val not in learners[self.group_id]['indep_categories'][key]:
val = np.nan
indep_to_use[key] = val
df = pd.DataFrame([indep_to_use])
for key, val in indep_to_use.items():
if learners[self.group_id]['indep_type'][key] is text_type:
#df[key] = pd.Series(df[key]).astype('category', categories=learners[self.group_id]['indep_categories'][key])
df[key] = | pd.Series(df[key]) | pandas.Series |
import copy
import os
import re
from functools import reduce
from os.path import join
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from custom.const import get_fig_folder
db_order = [
'Traumabase',
'UKBB',
'MIMIC',
'NHIS',
]
markers_db = {
'Traumabase': 'o',
'UKBB': '^',
'MIMIC': 'v',
'NHIS': 's',
}
task_order = [
'TB/death_pvals',
'TB/hemo',
'TB/hemo_pvals',
# 'TB/platelet',
'TB/platelet_pvals',
'TB/septic_pvals',
'UKBB/breast_25',
'UKBB/breast_pvals',
'UKBB/fluid_pvals',
'UKBB/parkinson_pvals',
'UKBB/skin_pvals',
'MIMIC/hemo_pvals',
'MIMIC/septic_pvals',
# 'NHIS/bmi_pvals',
'NHIS/income_pvals',
]
task_order_renamed = [t.replace('_', '\\_').replace('pvals', 'screening') for t in task_order]
rename_db = {
'TB': 'Traumabase',
}
def run_feature_importance(graphics_folder, results_folder, n, average_folds,
mode, hue_by_task):
def retrive_importance(n):
filenames = [
f'{n}_importances.csv',
f'{n}_mv_props.csv',
]
dfs = []
# Aggregate feature importances of all tasks
for root, subdirs, files in os.walk(results_folder):
print(root)
res = re.search(join(results_folder, '/(.*)/RS'), root)
if res is None:
continue
if not all([f in files for f in filenames]):
continue
task = res.group(1)
db = task.split('/')[0]
res = re.search('RS0_T(.)_', root)
trial = res.group(1)
task = task.replace('_', '\\_').replace('pvals', 'screening')
importance = pd.read_csv(join(root, f'{n}_importances.csv'), index_col=0)
mv_props = pd.read_csv(join(root, f'{n}_mv_props.csv'), index_col=0)
mv_props.set_index('fold', inplace=True)
importance.reset_index(inplace=True)
importance.set_index(['fold', 'repeat'], inplace=True)
importance_avg = importance.groupby(level='fold').mean()
if average_folds:
importance_avg = importance_avg.mean()
importance_avg = importance_avg.to_frame().T
mv_props = mv_props.mean()
mv_props = mv_props.to_frame().T
id_vars = None
index = ['feature']
else:
importance_avg.reset_index(inplace=True)
mv_props.reset_index(inplace=True)
id_vars = ['fold']
index = ['fold', 'feature']
importance_avg = pd.melt(importance_avg, id_vars=id_vars,
var_name='feature', value_name='importance_abs')
importance_avg.set_index(index, inplace=True)
mv_props = pd.melt(mv_props, id_vars=id_vars, var_name='feature', value_name='mv_prop')
mv_props.set_index(index, inplace=True)
df = pd.concat([importance_avg, mv_props], axis=1)
assert not | pd.isna(df) | pandas.isna |
import os
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
import seaborn as sns
import datetime
import pandas as pd
import matplotlib.dates as mdates
import common
infile = snakemake.input[0]
outfile = snakemake.output[0]
df = pd.read_table(infile)
df["time"] = | pd.to_datetime(df["time"]) | pandas.to_datetime |
# coding: utf-8
import numpy as np
import tensorflow as tf
import cv2 as cv
import time
import base64
import pandas as pd
from utils.visualization_utils import visualize_boxes_and_labels_on_image_array # Taken from Google Research GitHub
from utils.mscoco_label_map import category_index
############################# MODIFY BELOW #############################
# Generate the base64 string of each frame, not recommended
ENCODE_B64 = False
# Prints information about training in console
VERBOSE = True
# Show video being processed in window
SHOW_PROCESS = True
# Create a video with the bounding boxes
WRITE_VIDEO_OUT = True
# Minimum score threshold for a bounding box to be recorded in data
THRESHOLD = 0.2
OUTPUT_FPS = 24.0
# Change name of video being processed
VIDEO_FILE_NAME = "../videos/DroneCarFestival3"
VIDEO_EXTENSION = ".mp4"
############################# MODIFY ABOVE #############################
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile("frozen_inference_graph.pb", 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading the videocapture objects
cap = cv.VideoCapture(f'{VIDEO_FILE_NAME}{VIDEO_EXTENSION}')
if WRITE_VIDEO_OUT:
# Setup the video creation process
fourcc = cv.VideoWriter_fourcc(*'MP4V')
out = cv.VideoWriter(f'{VIDEO_FILE_NAME}WithBoundingBoxes.mp4', fourcc, OUTPUT_FPS, (1280, 720))
out_orig = cv.VideoWriter(f'{VIDEO_FILE_NAME}Original.mp4', fourcc, OUTPUT_FPS, (1280, 720))
# Start the session
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
frame_base64_ls = [] # The list containing the frame in base64 format and their timestamp
frame_info_ls = [] # The list containing the information about the frames
counter = 0
while cap.isOpened():
ret, image = cap.read()
if ret:
# Retrieve timestamp
curr_frame = int(cap.get(cv.CAP_PROP_POS_FRAMES))
# Convert image into an np array
image_np = np.array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
t1 = time.time()
# Run the algorithm, retrieve the boxes, score and classes
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
t2 = time.time()
# Remove the leading 1 dimension
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.int32)
scores = np.squeeze(scores)
# Draw the bounding boxes with information about the predictions
visualize_boxes_and_labels_on_image_array(
image_np,
boxes,
classes,
scores,
category_index,
use_normalized_coordinates=True,
line_thickness=2
)
# Encode the image into base64
if ENCODE_B64:
retval, buffer = cv.imencode('.png', image_np)
img_str = base64.b64encode(buffer)
image_b64 = 'data:image/png;base64,{}'.format(img_str.decode('ascii'))
# Append the image along with timestamp to the frame_base64_ls
frame_base64_ls.append([curr_frame, image_b64])
# Update the output video
if WRITE_VIDEO_OUT:
out.write(image_np)
out_orig.write(image) # Writes the original image
# Process the information about the video at that exact timestamp
timestamp_df = pd.DataFrame([curr_frame for _ in range(int(num))], columns=["frame"])
boxes_df = pd.DataFrame(boxes, columns=['y', 'x', 'bottom', 'right'])
classes_df = pd.DataFrame(classes, columns=['class'])
score_df = pd.DataFrame(scores, columns=['score'])
# Maps a np array of integer to their coco index
coco_map = np.vectorize(lambda i: category_index[i]['name'])
classes_str_df = pd.DataFrame(coco_map(classes), columns=['class_str'])
# Concatenate all the information
info_df = | pd.concat([timestamp_df, boxes_df, classes_df, classes_str_df, score_df], axis=1) | pandas.concat |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.