prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = | pd.Categorical(["a", "b"]) | pandas.Categorical |
# pylint: disable=C0103,C0301,E0401
"""Process raw IEM data, output single optimized pickle file"""
import argparse
import os
import sys
import time
from multiprocessing import Pool
from pathlib import Path
import numpy as np
import pandas as pd
import ruptures as rpt
# note: this would not install properly on AWS EB!
# only needed for preprocessing, not the app, so
# it has been removed as a dependency from this project's Pipfile
# bias-correction = {git = "https://github.com/pankajkarman/bias_correction.git"}
# git+https://github.com/pankajkarman/bias_correction.git#egg=bias-correction
from bias_correction import BiasCorrection
from scipy.signal import find_peaks
# this hack is done to alllow import from luts.py in app dir
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from luts import decades
def filter_spurious(station, tname="ts"):
"""Identify and remove spurious observations,
returns filtered data and flagged observations"""
station = station.set_index(tname)
# ignore missing speed data
ws_series = station[~np.isnan(station["sped"])]["sped"]
# identify and remove completely obvious peaks, to help with dip detection
obv_peaks, _ = find_peaks(ws_series, prominence=30, threshold=50)
# if-else in case no obvious spikes
if obv_peaks.shape[0] != 0:
obv_spikes = ws_series[obv_peaks]
ws_series = ws_series.drop(obv_spikes.index)
else:
obv_spikes = | pd.Series() | pandas.Series |
import warnings
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
import seaborn as sns
from bdpy.fig import box_off
def makeplots(
df,
x=None, y=None,
x_list=None,
subplot=None, subplot_list=None,
figure=None, figure_list=None,
group=None, group_list=None,
bar_group_width=0.8,
plot_type='bar', horizontal=False, ebar=None,
plot_size_auto=True, plot_size=(4, 0.3),
max_col=None,
y_lim=None, y_ticks=None,
title=None, x_label=None, y_label=None,
fontsize=12, tick_fontsize=9, points=100,
style='default', colorset=None,
chance_level=None, chance_level_style={'color': 'k', 'linewidth': 1},
swarm_dot_color='gray',
swarm_dot_size=3, swarm_dot_alpha=0.7,
swarm_violin_color='blue',
box_color='blue', box_width=0.5, box_linewidth=1,
box_meanprops=dict(linestyle='-', linewidth=1.5, color='red'),
box_medianprops={},
removenan=True,
verbose=False, colors=None, reverse_x=False
):
'''Make plots.
Parameters
----------
df : pandas.core.frame.DataFrame
x : str
y : str
x_list : list
subplot : str
subplot : list
figure : str
figure_list : list
plot_type : {'bar', 'violin', 'paired violin', 'swarm', 'swarm+box'}
horizontal: bool
plot_size : (width, height)
y_lim : (y_min, y_max)
y_ticks : array_like
title, x_label, y_label : str
fontsize : int
tick_fontsize : int
style : str
verbose : bool
Returns
-------
fig : matplotlib.figure.Figure or list of matplotlib.figure.Figure
'''
x_keys = sorted(df[x].unique())
subplot_keys = sorted(df[subplot].unique())
figure_keys = sorted(df[figure].unique()) if figure is not None else [None]
group_keys = sorted(df[group].unique()) if group is not None else [None]
x_list = x_keys if x_list is None else x_list
subplot_list = subplot_keys if subplot_list is None else subplot_list
figure_list = figure_keys if figure_list is None else figure_list
group_list = group_keys if group_list is None else group_list
if reverse_x:
x_list = x_list[::-1]
group_list = group_list[::-1]
grouping = group is not None
if plot_type == 'paired violin':
if not grouping:
RuntimeError('plot type "paired violin" can be used only when grouping is enabled')
comparison_pairs = list(__split_list(group_list, 2))
if grouping:
warnings.warn('"grouping mode" is still experimental and will not work correctly yet!')
if verbose:
print('X: {}'.format(x_list))
print('Subplot: {}'.format(subplot_list))
if grouping:
print('Group by: {} ({})'.format(group_keys, group_list))
if figure is not None:
print('Figures: {}'.format(figure_list))
col_num = np.ceil(np.sqrt(len(subplot_list)))
row_num = int(np.ceil(len(subplot_list) / col_num))
col_num = int(col_num)
if max_col is not None and col_num > max_col:
col_num = max_col
row_num = int(np.ceil(len(subplot_list) / col_num))
# Plot size
if plot_size_auto:
if horizontal:
plot_size = (plot_size[0], plot_size[1] * len(x_list))
else:
plot_size = (plot_size[0] * len(x_list), plot_size[1])
# Figure size
figsize = (col_num * plot_size[0], row_num * plot_size[1]) # (width, height)
if verbose:
print('Subplot in {} x {}'.format(row_num, col_num))
# Figure instances
if plot_type == 'paired violin':
figure_instances = [
{
'label': f,
'comparison pair': p
}
for f in figure_list
for p in comparison_pairs
]
else:
figure_instances = [
{
'label': f
}
for f in figure_list
]
figs = []
# Figure loop
for figure_instance in figure_instances:
fig_label = figure_instance['label']
if verbose:
if fig_label is None:
print('Creating a figure')
else:
print('Creating figure for {}'.format(fig_label))
plt.style.use(style)
fig = plt.figure(figsize=figsize)
# Subplot loop
for i, sp_label in enumerate(subplot_list):
if verbose:
print('Creating subplot for {}'.format(sp_label))
# Set subplot position
col = int(i / row_num)
row = i - col * row_num
sbpos = (row_num - row - 1) * col_num + col + 1
# Get data
if plot_type == 'paired violin':
group_list = figure_instance['comparison pair']
data = __get_data(df, subplot, sp_label,
x, x_list, figure, fig_label, y,
group, group_list, grouping, removenan)
if not isinstance(sp_label, list):
if grouping:
data_mean = [[np.nanmean(d) for d in data_t] for data_t in data]
else:
data_mean = [np.nanmean(d) for d in data]
else:
data_mean = None
# Plot
ax = plt.subplot(row_num, col_num, sbpos)
if not style == 'ggplot':
if horizontal:
ax.grid(axis='x', color='k', linestyle='-', linewidth=0.5)
else:
ax.grid(axis='y', color='k', linestyle='-', linewidth=0.5)
xpos = range(len(x_list))
if plot_type == 'bar':
__plot_bar(
ax, xpos, data_mean,
horizontal=horizontal,
grouping=grouping, group_list=group_list,
bar_group_width=bar_group_width
)
elif plot_type == 'violin':
group_label_list = __plot_violin(
ax, xpos, data,
horizontal=horizontal,
grouping=grouping, group_list=group_list,
bar_group_width=bar_group_width, points=points
)
elif plot_type == 'paired violin':
group_label_list = __plot_violin_paired(
ax, xpos, data,
horizontal=horizontal,
grouping=grouping, group_list=group_list,
points=points, colors=colors
)
elif plot_type == 'swarm':
__plot_swarm(
ax, x_list, data,
horizontal=horizontal, grouping=grouping,
dot_color=swarm_dot_color,
dot_size=swarm_dot_size,
dot_alpha=swarm_dot_alpha,
violin_color=swarm_violin_color,
)
elif plot_type == 'swarm+box':
group_label_list = __plot_swarmbox(
ax, x_list, data,
horizontal=horizontal, reverse_x=reverse_x,
grouping=grouping, group_list=group_list,
dot_color=swarm_dot_color,
dot_size=swarm_dot_size,
dot_alpha=swarm_dot_alpha,
box_color=box_color, box_width=box_width, box_linewidth=box_linewidth,
box_meanprops=box_meanprops,
box_medianprops=box_medianprops
)
else:
raise ValueError('Unknown plot_type: {}'.format(plot_type))
if not horizontal:
# Vertical plot
if grouping and plot_type == 'swarm+box': # swarm+boxのgroupingは擬似的なgroupingになっているためxticksの修正が必要
ax.set_xlim([ -1, len(x_list) * len(group_list) ])
new_x_list = np.arange(len(x_list)) * len(group_list) + len(group_list) / 2. - 0.5
ax.set_xticks(new_x_list)
else:
ax.set_xlim([-1, len(x_list)])
ax.set_xticks(range(len(x_list)))
if row == 0:
ax.set_xticklabels(x_list, rotation=-45, ha='left', fontsize=tick_fontsize)
else:
ax.set_xticklabels([])
if y_lim is None:
pass
else:
ax.set_ylim(y_lim)
if y_ticks is not None:
ax.set_yticks(y_ticks)
ax.tick_params(axis='y', labelsize=tick_fontsize, grid_color='gray', grid_linestyle='--', grid_linewidth=0.8)
if chance_level is not None:
plt.hlines(chance_level, xmin=plt.gca().get_xlim()[0], xmax=plt.gca().get_xlim()[1], **chance_level_style)
else:
# Horizontal plot
if grouping and plot_type == 'swarm+box': # swarm+boxのgroupingは擬似的なgroupingになっているためyticksの修正が必要
ax.set_ylim([ -1, len(x_list) * len(group_list) ])
new_x_list = np.arange(len(x_list)) * len(group_list) + len(group_list) / 2. - 0.5
ax.set_yticks(new_x_list)
else:
ax.set_ylim([-1, len(x_list)])
ax.set_yticks(range(len(x_list)))
if col == 0:
ax.set_yticklabels(x_list, fontsize=tick_fontsize)
else:
ax.set_yticklabels([])
if y_lim is None:
pass
else:
ax.set_xlim(y_lim)
if y_ticks is not None:
ax.set_xticks(y_ticks)
ax.tick_params(axis='x', labelsize=tick_fontsize, grid_color='gray', grid_linestyle='--', grid_linewidth=0.8)
if chance_level is not None:
plt.vlines(chance_level, ymin=plt.gca().get_ylim()[0], ymax=plt.gca().get_ylim()[1], **chance_level_style)
# Inset title
x_range = plt.gca().get_xlim()[1] - plt.gca().get_xlim()[0]
y_range = plt.gca().get_ylim()[1] - plt.gca().get_ylim()[0]
tpos = (
plt.gca().get_xlim()[0] + 0.03 * x_range,
plt.gca().get_ylim()[1] - 0.03 * y_range
)
ax.text(tpos[0], tpos[1], sp_label, horizontalalignment='left', verticalalignment='top', fontsize=fontsize, bbox=dict(facecolor='white', edgecolor='none'))
# Inset legend
if grouping:
if 'violin' in plot_type:
if i == len(subplot_list) - 1:
group_label_list = group_label_list[::-1]
ax.legend(*zip(*group_label_list), loc='upper left', bbox_to_anchor=(1, 1))
elif plot_type == 'swarm+box':
if i == len(subplot_list) - 1:
ax.legend(*zip(*group_label_list), loc='upper left', bbox_to_anchor=(1, 1))
else:
plt.legend()
box_off(ax)
plt.tight_layout()
# Draw X/Y labels and title ------------------------------------------
ax = fig.add_axes([0, 0, 1, 1])
ax.patch.set_alpha(0.0)
ax.set_axis_off()
# X Label
if x_label is not None:
txt = y_label if horizontal else x_label
ax.text(0.5, 0, txt, verticalalignment='center', horizontalalignment='center', fontsize=fontsize)
# Y label
if y_label is not None:
txt = x_label if horizontal else y_label
ax.text(0, 0.5, txt, verticalalignment='center', horizontalalignment='center', fontsize=fontsize, rotation=90)
# Figure title
if title is not None:
if fig_label is None:
ax.text(0.5, 0.99, title, horizontalalignment='center', fontsize=fontsize)
else:
ax.text(0.5, 0.99, '{}: {}'.format(title, fig_label), horizontalalignment='center', fontsize=fontsize)
figs.append(fig)
if figure is None:
return figs[0]
else:
return figs
def __plot_bar(
ax, xpos, data_mean,
horizontal=False,
grouping=False, group_list=[],
bar_group_width=0.8
):
if grouping:
ydata = np.array(data_mean)
n_grp = ydata.shape[1]
w = bar_group_width / n_grp
for grpi in range(n_grp):
offset = grpi * w
if horizontal:
plt.barh(np.array(xpos) - bar_group_width / 2 + (bar_group_width / 2) * w + offset, ydata[:, grpi], height=w, label=group_list[grpi])
else:
plt.bar(np.array(xpos) - bar_group_width / 2 + (bar_group_width / 2) * w + offset, ydata[:, grpi], width=w, label=group_list[grpi])
else:
if horizontal:
ax.barh(xpos, data_mean, color='gray')
else:
ax.bar(xpos, data_mean, color='gray')
def __plot_violin(
ax, xpos, data,
horizontal=False,
grouping=False, group_list=[],
bar_group_width=0.8, points=100
):
if grouping:
n_grp = len(group_list)
w = bar_group_width / (n_grp + 1)
group_label_list = []
for grpi in range(n_grp):
offset = grpi * w - (n_grp // 2) * w
xpos_grp = np.array(xpos) + offset #- bar_group_width / 2 + (bar_group_width / 2) * w + offset
ydata_grp = [a_data[grpi] for a_data in data]
violinobj = ax.violinplot(
ydata_grp, xpos_grp,
vert=not horizontal,
showmeans=True, showextrema=False, showmedians=False, points=points,
widths=w * 0.8)
color = violinobj["bodies"][0].get_facecolor().flatten()
group_label_list.append((mpatches.Patch(color=color), group_list[grpi]))
else:
ax.violinplot(data, xpos, vert=not horizontal, showmeans=True, showextrema=False, showmedians=False, points=points)
group_label_list = None
return group_label_list
def __plot_violin_paired(
ax, xpos, data,
horizontal=False,
grouping=False, group_list=[],
points=100, colors=None
):
assert grouping
n_grp = len(group_list)
assert n_grp == 2
group_label_list = []
if colors is not None and len(colors) >= 2:
__draw_half_violin(ax, [a_data[0] for a_data in data], points, xpos, color=colors[0], left=True, vert=not horizontal)
__draw_half_violin(ax, [a_data[1] for a_data in data], points, xpos, color=colors[1], left=False, vert=not horizontal)
else:
colors = []
color = __draw_half_violin(ax, [a_data[0] for a_data in data], points, xpos, color=None, left=True, vert=not horizontal)
colors.append(color)
color = __draw_half_violin(ax, [a_data[1] for a_data in data], points, xpos, color=None, left=False, vert=not horizontal)
colors.append(color)
for color, label in zip(colors, group_list):
group_label_list.append((mpatches.Patch(color=color), label))
return group_label_list
def __plot_swarm(
ax, x_list, data,
horizontal=False,
grouping=False,
dot_color='#595959', dot_size=1.5, dot_alpha=0.8,
violin_color='blue'
):
if grouping:
raise RuntimeError("The function of grouping on `swarm` plot is not implemeted yet.")
else:
df_list = []
for xi, x_lbl in enumerate(x_list):
a_df = pd.DataFrame.from_dict({'y': data[xi]})
a_df['x'] = x_lbl
df_list.append(a_df)
tmp_df = | pd.concat(df_list) | pandas.concat |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = | pd.DataFrame() | pandas.DataFrame |
"""
Routines for filling missing data.
"""
from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
if TYPE_CHECKING:
from pandas import Index
def check_value_size(value, mask: np.ndarray, length: int):
"""
Validate the size of the values passed to ExtensionArray.fillna.
"""
if is_array_like(value):
if len(value) != length:
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {length}"
)
value = value[mask]
return value
def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Parameters
----------
arr : ArrayLike
values_to_mask: list, tuple, or scalar
Returns
-------
np.ndarray[bool]
"""
# When called from Block.replace/replace_list, values_to_mask is a scalar
# known to be holdable by arr.
# When called from Series._single_replace, values_to_mask is tuple or list
dtype, values_to_mask = infer_dtype_from(values_to_mask)
# error: Argument "dtype" to "array" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values_to_mask = np.array(values_to_mask, dtype=dtype) # type: ignore[arg-type]
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
if is_numeric_v_string_like(arr, x):
# GH#29553 prevent numpy deprecation warnings
pass
else:
mask |= arr == x
if na_mask.any():
mask |= isna(arr)
if not isinstance(mask, np.ndarray):
# e.g. if arr is IntegerArray, then mask is BooleanArray
mask = mask.to_numpy(dtype=bool, na_value=False)
return mask
def clean_fill_method(method, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
# interpolation methods that dispatch to np.interp
NP_METHODS = ["linear", "time", "index", "values"]
# interpolation methods that dispatch to _interpolate_scipy_wrapper
SP_METHODS = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
"cubicspline",
]
def clean_interp_method(method: str, index: Index, **kwargs) -> str:
order = kwargs.get("order")
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
valid = NP_METHODS + SP_METHODS
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
f"{method} interpolation requires that the index be monotonic."
)
return method
def find_valid_index(values, *, how: str) -> int | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
values : ndarray or ExtensionArray
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
int or None
"""
assert how in ["first", "last"]
if len(values) == 0: # early stop
return None
is_valid = ~isna(values)
if values.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid[::].argmax()
elif how == "last":
idxpos = len(values) - 1 - is_valid[::-1].argmax()
chk_notna = is_valid[idxpos]
if not chk_notna:
return None
return idxpos
def interpolate_array_2d(
data: np.ndarray,
method: str = "pad",
axis: int = 0,
index: Index | None = None,
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
coerce: bool = False,
downcast: str | None = None,
**kwargs,
) -> None:
"""
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
Notes
-----
Alters 'data' in-place.
"""
try:
m = clean_fill_method(method)
except ValueError:
m = None
if m is not None:
if fill_value is not None:
# similar to validate_fillna_kwargs
raise ValueError("Cannot pass both fill_value and method")
interpolate_2d(
data,
method=m,
axis=axis,
limit=limit,
limit_area=limit_area,
)
else:
assert index is not None # for mypy
_interpolate_2d_with_fill(
data=data,
index=index,
axis=axis,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
**kwargs,
)
return
def _interpolate_2d_with_fill(
data: np.ndarray, # floating dtype
index: Index,
axis: int,
method: str = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
**kwargs,
) -> None:
"""
Column-wise application of _interpolate_1d.
Notes
-----
Alters 'data' in-place.
The signature does differ from _interpolate_1d because it only
includes what is needed for Block.interpolate.
"""
# validate the interp method
clean_interp_method(method, index, **kwargs)
if is_valid_na_for_dtype(fill_value, data.dtype):
fill_value = na_value_for_dtype(data.dtype, compat=False)
if method == "time":
if not needs_i8_conversion(index.dtype):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError(
"Invalid limit_direction: expecting one of "
f"{valid_limit_directions}, got '{limit_direction}'."
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
f"{limit_area}."
)
# default limit is unlimited GH #16282
limit = algos.validate_limit(nobs=None, limit=limit)
indices = _index_to_interp_indices(index, method)
def func(yvalues: np.ndarray) -> None:
# process 1-d slices in the axis direction
_interpolate_1d(
indices=indices,
yvalues=yvalues,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs,
)
# interp each column independently
np.apply_along_axis(func, axis, data)
return
def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
"""
Convert Index to ndarray of indices to pass to NumPy/SciPy.
"""
xarr = index._values
if needs_i8_conversion(xarr.dtype):
# GH#1646 for dt64tz
xarr = xarr.view("i8")
if method == "linear":
inds = xarr
inds = cast(np.ndarray, inds)
else:
inds = np.asarray(xarr)
if method in ("values", "index"):
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
return inds
def _interpolate_1d(
indices: np.ndarray,
yvalues: np.ndarray,
method: str | None = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
bounds_error: bool = False,
order: int | None = None,
**kwargs,
):
"""
Logic for the 1-d interpolation. The input
indices and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
Notes
-----
Fills 'yvalues' in-place.
"""
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
return
if valid.all():
return
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
first_valid_index = find_valid_index(yvalues, how="first")
if first_valid_index is None: # no nan found in start
first_valid_index = 0
start_nans = set(range(first_valid_index))
last_valid_index = find_valid_index(yvalues, how="last")
if last_valid_index is None: # no nan found in end
last_valid_index = len(yvalues)
end_nans = set(range(1 + last_valid_index, len(valid)))
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
preserve_nans: list | set
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
mid_nans = all_nans - start_nans - end_nans
preserve_nans |= mid_nans
# sort preserve_nans and convert to list
preserve_nans = sorted(preserve_nans)
if method in NP_METHODS:
# np.interp requires sorted X values, #21037
indexer = np.argsort(indices[valid])
yvalues[invalid] = np.interp(
indices[invalid], indices[valid][indexer], yvalues[valid][indexer]
)
else:
yvalues[invalid] = _interpolate_scipy_wrapper(
indices[valid],
yvalues[valid],
indices[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs,
)
yvalues[preserve_nans] = np.nan
return
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = f"{method} interpolation requires SciPy."
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "_is_all_dates", False):
# GH 5975, scipy.interp1d can't handle datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
alt_methods["pchip"] = interpolate.pchip_interpolate
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
elif method == "cubicspline":
alt_methods["cubicspline"] = _cubicspline_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if | isna(order) | pandas.core.dtypes.missing.isna |
# Import required libraries
import pandas as pd
import nest_asyncio
import numpy as np
import warnings
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
# Configurations
warnings.filterwarnings('ignore')
# This function takes a card and transforms it to the shape required by the models
def transform_card(insert):
# Create the empty dataset to populate with our card
set_df = pd.DataFrame(columns=['name', 'lang', 'released_at', 'mana_cost', 'cmc', 'type_line',
'oracle_text', 'power', 'toughness', 'colors', 'color_identity',
'keywords', 'legalities', 'games', 'set', 'set_name', 'set_type',
'digital', 'rarity', 'flavor_text', 'artist', 'edhrec_rank', 'prices',
'loyalty', 'prints','image_uris', 'card_faces', 'oracle_text_1', 'oracle_text_2',
'image_uris_1', 'image_uris_2'])
# Insert the new card into the empty dataframe from before
set_df = set_df.append(insert,ignore_index=True)
# If it has text in "oracle_text_1", then it's a Double Faced Card
set_df['DFC'] = np.where(set_df['oracle_text_1'] != "None", 1, 0)
# Transform the data in double faced cards
# Let's first create a dataframe that just has the card name and the column 'card_faces'
double_cards_df = set_df[['name','card_faces']].dropna()
# We also filter it so we get cards that actually have 2 sides
double_cards_df = double_cards_df[double_cards_df['card_faces']!="none"]
# If we actually have information about the 2 faces, we separate them into 2 columns
try:
double_cards_df[['face1','face2']] = pd.DataFrame(double_cards_df['card_faces'].tolist(), index= double_cards_df.index)
except:
double_cards_df[['oracle_text_1','oracle_text_2']] = "None"
# Now let's drop the column 'card_faces'
double_cards_df.drop("card_faces",axis=1, inplace=True)
# We now go into each key within the dictionary of face1 and face2 and separate them into columns
try:
double_cards_df[double_cards_df['face1'].apply(pd.Series).columns + "_1"] = double_cards_df['face1'].apply(pd.Series)
double_cards_df[double_cards_df['face2'].apply(pd.Series).columns + "_2"] = double_cards_df['face2'].apply(pd.Series)
except:
pass
# Define a list of columns we want to keep from the 2 sided cards
cols_to_keep = ['name','oracle_text_1','oracle_text_2','image_uris_1','image_uris_2', 'colors_1',
'power_1', 'toughness_1', 'loyalty_1']
# For each column in the dataframe, if it's not a selected column, we drop it
for i in double_cards_df.columns:
if i not in cols_to_keep:
double_cards_df.drop(i, axis=1, inplace=True)
# We now need to consolidate the 2 oracle texts into 1, we join them together
double_cards_df['oracle_text_dobles'] = double_cards_df['oracle_text_1'] + "\n" + double_cards_df['oracle_text_2']
# Reset the indexes
double_cards_df = double_cards_df.reset_index(drop=True)
# Merge the 2 faces info into our main df
# We now merge them by card name
set_df = set_df.merge(double_cards_df, on=["name","oracle_text_1","oracle_text_2"], how="left").drop("card_faces",axis=1)
# We use this script to replace Nulls with "None"
set_df[['oracle_text_1','oracle_text_2']] = set_df[['oracle_text_1','oracle_text_2']].fillna("None")
try:
set_df[['image_uris_1','image_uris_2', 'colors_1',
'power_1', 'toughness_1','loyalty_1']] = set_df[['image_uris_1','image_uris_2', 'colors_1', 'power_1', 'toughness_1','loyalty_1']].fillna("None")
except:
pass
# Now that we have our oracle text from the 2 card sides joined together, we want to use it to replace
# the actual "oracle_text" from the original dataframe, which is actually empty
# If oracle_text is empty (meaning it's a double faced card), we replace it with our 'oracle_text_dobles' column
set_df['oracle_text'] = np.where(set_df['oracle_text'].isna(),set_df['oracle_text_dobles'],set_df['oracle_text'])
# And now that column is useless so we drop it
set_df = set_df.drop("oracle_text_dobles",axis=1)
# We need to do the same for all the other columns. However, for these columns, we bring the results
# of the front card:
# Color of the card
try:
set_df['colors'] = np.where(set_df['colors'].isna(),set_df['colors_1'],set_df['colors'])
set_df = set_df.drop("colors_1",axis=1)
except:
pass
# Power of the creature
try:
set_df['power'] = np.where(set_df['power'].isna(),set_df['power_1'],set_df['power'])
set_df = set_df.drop("power_1",axis=1)
except:
pass
# Toughness of the creature
try:
set_df['toughness'] = np.where(set_df['toughness'].isna(),set_df['toughness_1'],set_df['toughness'])
set_df = set_df.drop("toughness_1",axis=1)
except:
pass
# Loyalty of the planeswalker
try:
set_df['loyalty'] = np.where(set_df['loyalty'].isna(),set_df['loyalty_1'],set_df['loyalty'])
set_df = set_df.drop("loyalty_1",axis=1)
except:
pass
# One last thing. We can create a new column that will indicate if the card is a double faced card or not
set_df['DFC'] = np.where(set_df['oracle_text_1'] != "None", 1, 0)
# CMC grouping
# Create groupings for the cmc. For 7 or above, we group them together
set_df['cmc_grp'] = np.where(set_df['cmc'] <= 6.0, (set_df['cmc'].astype('int').astype('str'))+"_drop", "7plus_drop")
# Separate the Keywords column into unique keyword columns
# Create a list to use as column names for the keyword columnn
my_list = list(set_df['keywords'].apply(pd.Series).columns)
string = 'keyword_'
kw_list = [string + str(x+1) for x in my_list]
print("Keyword Columns:")
print(kw_list)
#Apply the separation to our dataset
set_df[kw_list] = set_df['keywords'].apply(pd.Series).fillna('99999')
# Separate the Legalities column into unique legality columns
#Apply the separation to our dataset
set_df[set_df['legalities'].apply(pd.Series).columns] = set_df['legalities'].apply(pd.Series)
# Separate the prices column into unique price columns
#Apply the separation to our dataset
set_df[set_df['prices'].apply(pd.Series).columns] = set_df['prices'].apply(pd.Series)
# Let's check the shape of our dataframe once again
print(f"Shape of dataframe: {set_df.shape}")
# Colors
print(f"Max colors in a card: {len(list(set_df['colors'].apply(pd.Series).fillna('99999').columns))}")
# Lets create a dataframe that joins the colors to create all possible color combinations
color_df = set_df['colors'].apply(pd.Series).fillna('')
color_df['color'] = color_df.apply(lambda x: ''.join(sorted(x)), axis=1).replace('','Colorless')
color_df = color_df[['color']]
# Replace the "colors" column in the dataframe with our new column
set_df['colors'] = color_df
print(f"Different color in data: {len(set_df['colors'].unique())}")
# Repeat the process for the "color_identity" column
color_id_df = set_df['color_identity'].apply(pd.Series).fillna('')
color_id_df['color_identity'] = color_id_df.apply(lambda x: ''.join(sorted(x)), axis=1).replace('','Colorless')
color_id_df = color_id_df[['color_identity']]
set_df['color_identity'] = color_id_df
### Remove useless columns
# List of columns we no longer need
cols_to_drop = ['keywords','legalities','games','prices','usd_etched']
# Drop the columns
set_df.drop(cols_to_drop,axis=1,inplace=True)
# Creating the keywords columns
#Lets create a sub dataframe with just the name of the card and the keyword columns
temp_df = set_df[['name'] + kw_list]
# We now want to melt this dataframe so we have the name repeated as many times as keywords, but just 1 keywords column
temp_df2 = pd.melt(temp_df, id_vars=['name'], value_vars=kw_list).drop('variable',axis=1)
# Now we can pivot this sub dataframe and get a column for each keyword, with 0s and 1s depending on each card
kw_df = temp_df2.pivot(columns="value", values="value").fillna(0)
try:
kw_df = kw_df.drop('99999',axis=1)
except:
pass
try:
kw_df = kw_df.replace(regex={r'\D': 1})
except:
pass
# Let's add the name of the card to this new sub dataframe
result = pd.concat([temp_df2[['name']], kw_df], axis=1)
# Summing and resetting index will help to condense the data
final_df = result.groupby(['name']).sum().reset_index()
# We can now merge this sub dataframe with our main dataframe and get all the keywords!
set_df_kw = set_df.merge(final_df, on=['name'], how="left").drop(kw_list, axis=1)
### Replace nulls in `flavor_text`
# If a card does not have a flavor text, let's put "no flavor text" instead
set_df_kw['flavor_text'] = set_df_kw['flavor_text'].fillna("no_flavor_text")
### Replace nulls in `edhrec_rank`
# If a card does not have an edhrec_rank, let's replace it with int 999999
set_df_kw['edhrec_rank'] = set_df_kw['edhrec_rank'].fillna(999999).astype(int)
# Separate column ``type_line``
# We first separate the card type of the front from the card type of the back
try:
set_df_kw[['face','back']] = set_df_kw['type_line'].str.split(' // ',expand=True).fillna('None')
except:
set_df_kw[['face','back']] = [set_df_kw['type_line'],"None"]
# We then separate the face type using the "-" as separator
try:
set_df_kw[['face_type','face_subtype']] = set_df_kw['face'].str.split(' — ',expand=True).fillna('None')
except:
set_df_kw['face_type'] = set_df_kw['face']
set_df_kw['face_subtype'] = "None"
# If a card has a back, we then separate the back type using the "-" as separator
try:
set_df_kw[['back_type','back_subtype']] = set_df_kw['back'].str.split(' — ',expand=True).fillna('None')
except:
set_df_kw['back_type'] = set_df_kw['back']
set_df_kw['back_subtype'] = "None"
# Separate ``face_type`` in each possible token
# Let's obtain the max quantity of words within "face_type" column
max_word_len = []
for i in range(len(set_df_kw['face_type'].unique())):
append_length = len(set_df_kw['face_type'].unique()[i].split())
max_word_len.append(append_length)
face_type_max = max(max_word_len)
print(f"Max words in face_type: {face_type_max}")
# Using our result of max words in face_type, create as many face_type_N columns
face_type_cols = []
for i in range(face_type_max):
face_type_col = f"face_type_{i+1}"
face_type_cols.append(face_type_col)
# Use these columns to split the face_type column
set_df_kw[face_type_cols] = set_df_kw['face_type'].str.split(' ',expand=True).fillna('None')
# Separate ``face_subtype`` in each possible token
# Let's obtain the max quantity of words within "face_subtype" column
max_word_len = []
for i in range(len(set_df_kw['face_subtype'].unique())):
append_length = len(set_df_kw['face_subtype'].unique()[i].split())
max_word_len.append(append_length)
face_subtype_max = max(max_word_len)
print(f"Max words in face_subtype: {face_subtype_max}")
# Using our result of max words in face_subtype, create as many face_subtype_N columns
face_subtype_cols = []
for i in range(face_subtype_max):
face_subtype_col = f"face_subtype_{i+1}"
face_subtype_cols.append(face_subtype_col)
# Use these columns to split the face_subtype column
set_df_kw[face_subtype_cols] = set_df_kw['face_subtype'].str.split(' ',expand=True).fillna('None')
# Separate ``back_type`` in each possible token
# Let's obtain the max quantity of words within "back_type" column
max_word_len = []
for i in range(len(set_df_kw['back_type'].unique())):
append_length = len(set_df_kw['back_type'].unique()[i].split())
max_word_len.append(append_length)
back_type_max = max(max_word_len)
print(f"Max words in back_type: {back_type_max}")
# Using our result of max words in back_type, create as many face_subtype_N columns
back_type_cols = []
for i in range(back_type_max):
back_type_col = f"back_type_{i+1}"
back_type_cols.append(back_type_col)
# Use these columns to split the back_type column
set_df_kw[back_type_cols] = set_df_kw['back_type'].str.split(' ',expand=True).fillna('None')
# Separate ``back_subtype`` in each possible token
# Let's obtain the max quantity of words within "back_subtype" column
max_word_len = []
for i in range(len(set_df_kw['back_subtype'].unique())):
append_length = len(set_df_kw['back_subtype'].unique()[i].split())
max_word_len.append(append_length)
back_subtype_max = max(max_word_len)
print(f"Max words in back_subtype: {back_subtype_max}")
# Using our result of max words in back_subtype, create as many back_subtype_N columns
back_subtype_cols = []
for i in range(back_subtype_max):
back_subtype_col = f"back_subtype_{i+1}"
back_subtype_cols.append(back_subtype_col)
# Use these columns to split the back_subtype column
set_df_kw[back_subtype_cols] = set_df_kw['back_subtype'].str.split(' ',expand=True).fillna('None')
# Abilities Count
# Define a function that will split the oracle text using \n as delimiter
def count_abilities(string):
try:
abilities_count = len(string.split('\n'))
except:
abilities_count = 0
return abilities_count
# Apply the function and create the "abilities_count" column
set_df_kw['abilities_count'] = set_df_kw.apply(lambda x: count_abilities(x['oracle_text']),axis=1)
# Cleave fix
# Cleave transformation
# If card has cleave, remove "[" and "]" and repeat the same orcale text removing whatever is between them
try:
set_df_kw['oracle_text'] = np.where(set_df_kw['Cleave']==1,
set_df_kw['oracle_text'].str.replace("[","").str.replace("]","")+'\n'+set_df_kw['oracle_text'].str.replace(r"[\(\[].*?[\)\]] ", ""),
set_df_kw['oracle_text'])
except:
pass
# Monocolored, Multicolored and others
# If color column has just 1 character, it's monocolored (eg. "B" or "W")
set_df_kw['monocolored'] = np.where(set_df_kw['colors'].str.len() == 1,1,0)
# If it has more than 1 charater and it does not say Colorless, then it's multicolored
set_df_kw['multicolored'] = np.where((set_df_kw['colors'].str.len() > 1) & (set_df_kw['colors'] != "Colorless"),1,0)
# And these other variants
set_df_kw['two_colors'] = np.where(set_df_kw['colors'].str.len() == 2,1,0)
set_df_kw['three_colors'] = np.where(set_df_kw['colors'].str.len() == 3,1,0)
set_df_kw['four_colors'] = np.where(set_df_kw['colors'].str.len() == 4,1,0)
set_df_kw['five_colors'] = np.where(set_df_kw['colors'].str.len() == 5,1,0)
set_df_kw['colorless'] = np.where(set_df_kw['colors'] == "Colorless",1,0)
# Devotion
# We count how many mana symbols we find in a card CMC
set_df_kw['mana_symbols_cost'] = set_df_kw['mana_cost'].str.count('W|U|B|R|G').fillna(0)
# We also count how many specific mana symbols
set_df_kw['devotion_W'] = set_df_kw['mana_cost'].str.count('W').fillna(0)
set_df_kw['devotion_U'] = set_df_kw['mana_cost'].str.count('U').fillna(0)
set_df_kw['devotion_B'] = set_df_kw['mana_cost'].str.count('B').fillna(0)
set_df_kw['devotion_R'] = set_df_kw['mana_cost'].str.count('R').fillna(0)
set_df_kw['devotion_G'] = set_df_kw['mana_cost'].str.count('G').fillna(0)
# Prices
# We create some columns to detect if we have missing prices
set_df_kw['missing_usd'] = np.where(set_df_kw['usd'].isna(), 1, 0)
set_df_kw['missing_usd_foil'] = np.where(set_df_kw['usd_foil'].isna(), 1, 0)
set_df_kw['missing_eur'] = np.where(set_df_kw['eur'].isna(), 1, 0)
set_df_kw['missing_eur_foil'] = np.where(set_df_kw['eur_foil'].isna(), 1, 0)
set_df_kw['missing_tix'] = np.where(set_df_kw['tix'].isna(), 1, 0)
# If there are missings, we fill them with 0
set_df_kw['usd'] = set_df_kw['usd'].fillna(0)
set_df_kw['eur'] = set_df_kw['eur'].fillna(0)
set_df_kw['usd_foil'] = set_df_kw['usd_foil'].fillna(0)
set_df_kw['eur_foil'] = set_df_kw['eur_foil'].fillna(0)
set_df_kw['tix'] = set_df_kw['tix'].fillna(0)
# Power & Toughness
# We just want to fill NaNs with "None" to fix any card that is not a creature
set_df_kw['power'] = set_df_kw['power'].fillna("None")
# Loyalty
# We just want to fill NaNs with "None" to fix any card that is not a planeswalker
set_df_kw['loyalty'] = set_df_kw['loyalty'].fillna('None')
# X spells
# Create a column that is 1 if it's a card with X in it's mana cost
set_df_kw['X_spell'] = np.where(set_df_kw['mana_cost'].str.contains('{X}'),1,0)
# Text `(to be removed)`
# Remove text between brackets in oracle_text
set_df_kw['oracle_text'] = set_df_kw['oracle_text'].str.replace(r"\(.*\)","")
# Mana symbols in oracle text
# We create a column tha that is 1 if there are mana symbols inside the oracle text
set_df_kw['mana_symbols_oracle'] = np.where(set_df_kw['oracle_text'].str.contains('{W}|{U}|{B}|{R}|{G}'),1,0)
# We count how many different mana symbols are in the oracle text
set_df_kw['mana_symbols_oracle_nbr'] = set_df_kw['oracle_text'].str.count('{W}|{U}|{B}|{R}|{G}')
# Includes tapping ability
# We create a column that is 1 if the card has {T} in the oracle_text
set_df_kw['tapping_ability'] = np.where(set_df_kw['oracle_text'].str.contains('{T}'),1,0)
# Includes multiple choice
# We create a column that is 1 if the card has '• ' in the oracle_text
set_df_kw['multiple_choice'] = np.where(set_df_kw['oracle_text'].str.contains('• '),1,0)
# Replace card name
#EXACT MATCH
for i in range(len(set_df_kw)):
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].split(" // ")[0], 'CARDNAME')
#this is to also replace cardnames from back cards
try:
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].split(" // ")[1], 'CARDNAME')
except:
pass
#FIRST NAME MATCH
for i in range(len(set_df_kw)):
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].replace(",","").split(" // ")[0].split(" ")[0], 'CARDNAME')
#this is to also replace cardnames from back cards
try:
set_df_kw.at[i,"oracle_text"] = set_df_kw.at[i,'oracle_text'].replace(set_df_kw.at[i,'name'].replace(",","").split(" // ")[1].split(" ")[0], 'CARDNAME')
except:
pass
# Tokenize Oracle Text
# Define a function that takes the oracle text, removes undesired characters, stopwords and tokenizes it
def process_oracle(oracle):
"""Process oracle function.
Input:
oracle: a string containing an oracle
Output:
oracle_clean: a list of words containing the processed oracle
"""
import string
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
oracle = re.sub(r'\$\w*', '', oracle)
oracle = re.sub(r'^RT[\s]+', '', oracle)
oracle = re.sub(r'#', '', oracle)
oracle = re.sub("\d+", '', oracle)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
oracle_tokens = tokenizer.tokenize(oracle)
oracle_clean = []
for word in oracle_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# oracle_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
oracle_clean.append(stem_word)
return oracle_clean
# Apply the function and create a new column
set_df_kw['oracle_tokens'] = set_df_kw['oracle_text'].apply(lambda x: process_oracle(x))
# Create columns for each token
# Separate the tokens into columns
tokens_df = set_df_kw['oracle_tokens'].apply(pd.Series).fillna("None")
# Create a list with all the different tokens
tokens_set_list = []
remove_tokens = ['iii','None','•','x','c','r','−','g','iv','}:',
'eight','nine','ten','—','ii','u','b','w','p']
for i in tokens_df.columns:
tokens_set_list = list(set(tokens_set_list+list(tokens_df[i].unique())))
tokens_set_list = [x for x in tokens_set_list if x not in remove_tokens]
print(f"Number of tokens: {len(tokens_set_list)}")
# Create a new df with as many columns as tokens and 1s or 0s if the card has that token or not
empty_df = pd.DataFrame(columns=tokens_set_list)
k = 1
for i in empty_df.columns:
print(f"Progress: {round(k/len(empty_df.columns),2)*100}%")
for j in range(len(set_df_kw)):
if i in set_df_kw.at[j,'oracle_tokens']:
empty_df.at[j,i] = 1
else:
empty_df.at[j,i] = 0
k = k + 1
# Change the name of the columns with the token name and the "_tkn" string added
empty_df.columns = empty_df.columns + "_tkn"
print(f"Dataframe shape before merge: {set_df_kw.shape}")
# Merge with main dataframe
set_df_kw = pd.concat([set_df_kw, empty_df], axis=1)
print(f"Dataframe shape after merge: {set_df_kw.shape}")
# Create columns for each card type and subtype
# Get a list of the card types and subtypes columns
type_cols = face_type_cols + face_subtype_cols + back_type_cols + back_subtype_cols
print(type_cols)
# Create a sub-dataframe only with this columns
types_df = set_df_kw[type_cols]
# Create a list with all the different types
type_set_list = []
remove_types = []
for i in types_df.columns:
type_set_list = list(set(type_set_list+list(types_df[i].unique())))
type_set_list = [x for x in type_set_list if x not in remove_types]
# Create a new df with as many columns as types/subtypes and 1s or 0s if the card has that type/subtype or not
empty_df = pd.DataFrame(columns=type_set_list)
k = 1
for i in empty_df.columns:
print(f"Progress: {round(k/len(empty_df.columns),2)*100}%")
for j in range(len(set_df_kw)):
if i in set_df_kw.at[j,'type_line']:
empty_df.at[j,i] = 1
else:
empty_df.at[j,i] = 0
k= k + 1
# Change the name of the columns with the type name and the "_type" string added
empty_df.columns = empty_df.columns + "_type"
print(f"Dataframe shape before merge: {set_df_kw.shape}")
# Concatenate it to our main df
set_df_kw = pd.concat([set_df_kw, empty_df], axis=1).drop(face_type_cols+face_subtype_cols+back_type_cols+back_subtype_cols,axis=1)
print(f"Dataframe shape after merge: {set_df_kw.shape}")
# Flavor text
# Create a function that splits text into tokens and counts how many tokens are
def count_elements_in_list(string):
count = len(string.split())
return count
# Apply it to the flavor text
set_df_kw['flavor_text_len'] = set_df_kw['flavor_text'].apply(lambda x: count_elements_in_list(x))
# Create a column that is 1s if the card HAS flavor text and 0 if it doesn't
set_df_kw['flavor_text'] = np.where(set_df_kw['flavor_text']=="no_flavor_text",0,1)
# If the card has NO flavor text, change the flavor_text_len to 0
set_df_kw['flavor_text_len'] = np.where(set_df_kw['flavor_text']==0,0,set_df_kw['flavor_text_len'])
# Remove the `\n` from oracle_text
# Just replacing "\n" with " "
set_df_kw["oracle_text"] = set_df_kw["oracle_text"].apply(lambda x: x.replace("\n"," "))
# Card Super Types!!!
try:
set_df_kw['counterspell'] = np.where((set_df_kw['counter_tkn']==1) &
((set_df_kw['oracle_text'].str.lower().str.contains("counter target")) |
(set_df_kw['oracle_text'].str.lower().str.contains("counter all")) |
(set_df_kw['oracle_text'].str.lower().str.contains("counter it")))
,1,0)
except:
set_df_kw['counterspell'] = 0
set_df_kw['manarock'] = np.where(
((set_df_kw['tapping_ability']==1) |
(set_df_kw['oracle_text']).str.lower().str.contains("tap")) &
(set_df_kw['type_line']).str.lower().str.contains("artifact") &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}: add.*?(mana of any color|mana of that color|{(.*?)})",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}, tap an untapped.*?(mana of any color|mana of that color|{(.*?)})",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}: choose a color",regex=True)==True)
)
,1,0)
set_df_kw['manadork'] = np.where(
(set_df_kw['tapping_ability']==1)&
(set_df_kw['manarock']!=1) &
(set_df_kw['back_type']!="Land") &
(set_df_kw['type_line']).str.lower().str.contains("creature") &
(
(set_df_kw['oracle_text_1'].str.lower().str.contains(r"{t}: add",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}:.*?add one mana",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}: add",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"add (one|two|three|four|five) mana",regex=True)==True)
)
,1,0)
#Regex for a word or a word with "-"
una_palabra = "\w+"
una_palabra_con_rayita = "\w+-\w+"
regex_1 = f"({una_palabra}|{una_palabra_con_rayita})"
set_df_kw['removal'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(f"(destroy|exile) target ({regex_1}|({regex_1}, {regex_1})|({regex_1}, {regex_1}, {regex_1})|({regex_1}, {regex_1}, {regex_1}, {regex_1})) (creature|permanent)(?! from (a|your) graveyard| card from (a|your) graveyard)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) another target (creature|permanent)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy any number of target (creature|creatures|permanent|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) target (attacking|blocking|attacking or blocking) creature",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy up to (one|two|three) target (\w+) (creature|permanent|creatures|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile up to (one|two|three) target (creature|permanent|creatures|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile up to (one|two|three) target (nonland|nonartifact) (creature|permanent|creatures|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile up to (one|two|three) target (\w+) (\w+) (creature|permanent|creatures|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) target (\w+) or creature",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) a (creature|permanent) with the (greatest|highest|lowest) (power|toughness|converted mana cost|mana value)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) target (creature|permanent)(?! from a graveyard| card from a graveyard)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) up to (\w+) target (attacking or blocking|attacking|blocking) (creature|creatures)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target (player|opponent) sacrifices a (creature|permanent)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each (player|opponent) sacrifices (a|one|two|three|four) (creature|creatures|permanent|permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted (creature|permanent) is a treasure",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature doesn't untap",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(annihilator)")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals damage equal to its power to target creature",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(fights|fight) target creature")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(those creatures|the chosen creatures) fight each other",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(fights|fight) up to (\w+) target (creature|creatures)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(fights|fight) another target creature",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"choose target creature you don't control.*?each creature.*?deals damage equal.*?to that creature",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may have (cardname|it) fight (that creature|target creature|another target creature)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature deals damage to itself equal to (its power)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature gets -[0-9]/-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature gets \+[0-9]/-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature an opponent controls gets \-[0-9]/\-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature (gets|has).*?loses (all|all other) abilities", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature gets \-[0-9]/\-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature gets \-[0-9]/\-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature gets \+[0-9]/\-[2-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(enchanted|target) creature gets \-[0-9][0-9]/\-[0-9][0-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains("target creature gets \-x/\-x")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains("target creature gets \+x/\-x")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature an opponent controls gets \-x/\-x", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature gets \-x/\-x", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted (creature|permanent) can't attack or block",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains("enchanted creature has defender")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains("enchanted creature can't block.*?its activated abilities can't be activated")==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted creature.*?loses all abilities",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted (creature|permanent) can't attack.*?block.*?and its activated abilities can't be activated", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals ([2-9|x]) damage.*?(creature|any target|divided as you choose|to each of them)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals ([2-9|x]) damage.*?to each of up to (one|two|three|four) (target|targets)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals damage equal to.*?to (any target|target creature|target attacking creature|target blocking creature|target attacking or blocking creature)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target creature deals (.*?) damage to itself", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals damage to (any target|target creature|target attacking creature|target blocking creature|target attacking or blocking creature).*?equal to", regex=True)==True)) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cardname|it) deals [a-zA-Z0-9] damage to that player.",regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains("(cardname|it) deals [a-zA-Z0-9] damage to target (player|opponent) or planeswalker")==False) &
(set_df_kw['oracle_text'].str.lower().str.contains("(cardname|it) deals [a-zA-Z0-9] damage to that creature's controller")==False) &
(set_df_kw['oracle_text'].str.lower().str.contains("that was dealt damage this turn")==False) &
(set_df_kw['oracle_text'].str.lower().str.contains("^(?!damage|creature)\w* random")==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"search.*?(creature|artifact|enchantment) card",regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) target land",regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains("return it to the battlefield")==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"return that (card|creature|permanent) to the battlefield",regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"if you control a.*?^(?!liliana)\w* planeswalker",regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"^(?!additional cost|additional cost)\w* exile (target|a|one|two|three|all).*?from (your|a|target opponent's) graveyard",regex=True)==False)
,1,0)
set_df_kw['wrath'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) (all|all other|each|each other|all attacking) (creature|creatures|(.*?) creatures|permanent|permanents|(.*?) permanents|(nonland|multicolored) permanent|(nonland|multicolored) permanents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each (creature|other creature) gets -(x|[0-9])/-(x|[2-9])", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each creature deals damage to itself equal to", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) all artifacts, creatures, and enchantments", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"sacrifices (all|that many) (creatures|(.*?) creatures|permanents|(.*?) permanents)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"chooses.*?then sacrifices the rest", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"creatures.*?get -(x|[0-9])/-(x|[2-9])", regex=True)==True) | #Crippling Fear
(set_df_kw['oracle_text'].str.lower().str.contains(f"deals ([3-9]|x|[1-9][0-9]) damage to each (creature|{regex_1} creature)", regex=True)==True)
)
,1,0)
regex_2 = "(land|lands|basic land|basic lands|plains|island|swamp|mountain|forest|plains|islands|swamps|mountains|forests|basic plains|basic island|basic swamp|basic mountain|basic forest|basic plains|basic islands|basic swamps|basic mountains|basic forests)"
regex_3 = "(a|one|one|two|three|up to one|up to two|up to three|up to ten|up to x|x)"
set_df_kw['ramp'] = np.where(
(set_df_kw['face_type']!="Land") &
(set_df_kw['manadork']!=1) &
(set_df_kw['manarock']!=1) &
(set_df_kw['face_type']!="Snow Land") &
(set_df_kw['face_type']!="Artifact Land") &
(set_df_kw['type_line'].str.lower().str.contains(r"(\w+) // land", regex=True)==False) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"{t}: add", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"^(?!{[1-9]}: )\w* add (one|two) mana", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"{[1]}, {t}: add ({(c|w|u|b|r|g)}{(c|w|u|b|r|g)}|two)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever enchanted land is tapped for mana.*?adds", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(f"search (your|their) library for {regex_3} {regex_2}.*?put.*?onto the battlefield", regex=True)==True)
) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"{[1-9]}, {t}: add one mana", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted land.*?{t}: add {(c|1|w|u|b|r|g)}", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy target (land|nonbasic land)", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"spend this mana only to", regex=True)==False)
)
,1,0)
set_df_kw['tutor'] = np.where(
(set_df_kw['ramp']!=1) &
(set_df_kw['face_type']!="Land") &
(set_df_kw['face_type']!="Snow Land") &
(set_df_kw['face_type']!="Artifact Land") &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"search your (library|library and graveyard) for (a|an|any|any one|one|up to one|two|up to two|three|up to three|four|up to four|a(white|blue|black|red|green|colorless)) (card|cards|permanent|permanents|equipment|aura|aura or equipment|legendary|enchantment|enchantments|artifact|artifacts|creature|(.*?) creature cards|creature cards|creatures|sorcery|sorceries|instant|instants|planeswalker)", regex=True)==True)
) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"put (it|them|those cards|that card) into your graveyard", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"named", regex=True)==False)
)
,1,0)
set_df_kw['cardraw'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"draw (a|one|two|three|four|five|six|seven|x|(.*?) x) (card|cards)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"draw (cards equal to|that many cards)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target player draws (.*?) (card|cards)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(look at|reveal) the.*?put.*?(into|in) your hand", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(exile|look at the).*?(card|cards).*?you may (cast|play)", regex=True)==True)
) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever you draw a card", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"if you would draw a card", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"draw (a|one|two|three|four) (card|cards), then discard (a|one|two|three|four) (card|cards)", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"discard (a|one|two|three|four) (card|cards), then draw (a|one|two|three|four) (card|cards)", regex=True)==False)
)
,1,0)
set_df_kw['burn'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals ([1-9|x]) damage.*?(any target|player|opponent|to them|to each of them)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals (x|two|three|four|five) times (damage|x damage).*?(any target|player|opponent|to them|to each of up to)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals damage equal to.*?to (any target|target player|target opponent|to them|each player|each opponent)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals damage to (any target|target player|target opponent|to them|each player|each opponent).*?equal to", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals that much damage to (any target|target player|target opponent|each player|each opponent|that source's controller)", regex=True)==True)
)
,1,0)
set_df_kw['discard'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(that|target|each) (player|opponent) discards (a|one|two|three|that|all|all the) (card|cards)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"unless that player.*?discards a card", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"target (player|opponent) reveals their hand.*?you choose.*?exile (that|it)", regex=True)==True)
)
,1,0)
set_df_kw['enters_bf'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(enter|enters) the battlefield", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(enter|enters) the battlefield tapped", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"land (enter|enters) the battlefield", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"it becomes day", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchant creature", regex=True)==False)
)
,1,0)
set_df_kw['die_trigger'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"when (cardname|equipped creature) dies", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever.*?dies", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever.*?you (control|don't control) dies", regex=True)==True)
)
,1,0)
set_df_kw['attack_trigger'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(when|whenever) (cardname|equipped creature|it) attacks", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(when|whenever) (cardname|equipped creature|it) and.*?(other|another) (creature|creatures) attack", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(battalion|exert|raid)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(when|whenever) (cardname|equipped creature|it) enters the battlefield or attacks", regex=True)==True)
)
,1,0)
set_df_kw['pseudo_ramp'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may put a (land|basic land).*?onto the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(you|each player) may (play|put) an additional land", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"if it's a land card, you may put it onto the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"sacrifice.*?add.*?({(.*?)}|to your mana pool|mana of (any|any one) color)", regex=True)==True)
)
,1,0)
set_df_kw['static_ramp'] = np.where(
((set_df_kw['type_line'].str.lower().str.contains("enchantment")) |
(set_df_kw['type_line'].str.lower().str.contains("creature")) |
(set_df_kw['type_line'].str.lower().str.contains("artifact"))) &
(set_df_kw['back'].str.lower().str.contains("land")==False) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"at the beginning of.*?add.*?(mana|{(.*?)})", regex=True)==True)
)
,1,0)
regex_4 = "(a|one|up to one|two|up to two|three|up to three|four|up to four|five|up to five|six|up to six|x|up to x)"
set_df_kw['creature_tokens'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(f"(create|put) {regex_4}.*?creature (token|tokens)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(f"(living weapon|amass|fabricate|afterlife|populate)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(creature tokens|creature tokens with.*?) are created instead", regex=True)==True)
)
,1,0)
set_df_kw['extra_turn'] = np.where(set_df_kw['oracle_text'].str.lower().str.contains(r"(take|takes) (an|one|two) extra (turn|turns)", regex=True)==True
,1,0)
set_df_kw['plus1_counters'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"\+1/\+1 (counter|counters)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(evolve|mentor|adapt|bolster|bloodthirst|devour|monstrosity|reinforce|training)", regex=True)==True)
)
,1,0)
set_df_kw['graveyard_hate'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile.*?from (graveyards|a graveyard|his or her graveyard|target player's graveyard|each opponent's graveyard)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"remove all graveyards from the game", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile.*?all (cards|creatures) from all (graveyards|opponents' hands and graveyards)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile each opponent's graveyard", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"if a.*?(card|creature|permanent) would (be put into.*?graveyard|die).*?(instead exile|exile it instead)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"choose target card in (target opponent's|a) graveyard.*?exile (it|them)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(target|each) player puts all the cards from their graveyard", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(creature cards|permanents|creatures|permanent cards) in (graveyards|graveyards and libraries) can't enter the battlefield", regex=True)==True)
)
,1,0)
set_df_kw['free_spells'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(rather than pay|without paying) (its|it's|their|this spell's|the) mana cost", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"cascade", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may pay {", regex=True)==False)
)
,1,0)
set_df_kw['bounce_spell'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"return.*?to (it's|its|their) (owner's|owners') (hand|hands)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"owner.*?puts it.*?(top|bottom).*?library", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"^(?!islands)\w* you control", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"(when|whenever).*?dies.*?return.*?to its owner's hand", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"return (cardname|the exiled card) to its owner's hand", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever cardname.*?return it to its owner's hand", regex=True)==False)
)
,1,0)
set_df_kw['sac_outlet'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"sacrifice (a|another) (creature|permanent)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(exploit)", regex=True)==True)
)
,1,0)
set_df_kw['sac_payoff'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever (you|a player) (sacrifice|sacrifices) a (creature|permanent)", regex=True)==True)
)
,1,0)
set_df_kw['cant_counter'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"can't be countered", regex=True)==True)
)
,1,0)
set_df_kw['costx_more'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cost|costs) (.*?) more to cast", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"ward", regex=True)==True)
)
,1,0)
set_df_kw['costx_moreactivate'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cost|costs) (.*?) more to activate", regex=True)==True)
)
,1,0)
set_df_kw['costx_less'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cost|costs) (.*?) less to cast", regex=True)==True)
)
,1,0)
set_df_kw['costx_lessacitivate'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cost|costs) (.*?) less to activate", regex=True)==True)
)
,1,0)
set_df_kw['whenever_opp'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever (an opponent|a player)", regex=True)==True)
)
,1,0)
regex_5 = "(all|each|another|another target|x|x target|a|target|any number of|one|up to one|up to one target|two|up to two|up to two target|three|up to three|up to three target|four|up to four|up to four target)"
regex_6 = "(card|cards|creature|creatures|nonlegendary creature|creature card|creature cards|permanent|permanents|permanent card|permanent cards|land|lands|land card|land cards|instant or sorcery card|equipment card|aura card|aura or equipment card|artifact or enchantment)"
set_df_kw['returnfrom_gy'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return {regex_5} {regex_6}.*?from your graveyard to your hand", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return cardname from your graveyard to your hand", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"choose.*?graveyard.*?return.*?to your hand", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return.*?up to.*?from your graveyard to your hand", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return (target|another target).*?card from your graveyard to your hand", regex=True)==True)
)
,1,0)
set_df_kw['reanimation'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(fr"(return|put) {regex_5} {regex_6}.*?from (your|a) graveyard (to|onto) the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return cardname from your graveyard to the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"choose.*?graveyard.*?return.*?to the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"return.*?up to.*?from your graveyard to the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"enchant creature card in (a|your) graveyard.*?return enchanted creature card to the battlefield under your control", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"(return|returns|put) (all|any number of) (creature|permanent|enchantment|artifact|legendary permanent|legendary creature|nonlegendary creature|nonlegendary permanents|(.*?), (.*?) and (.*?)) cards.*?from (their|your|all) (graveyard|graveyards) (to|onto) the battlefield", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(fr"(return|put) (target|another target).*?card from your graveyard to the battlefield", regex=True)==True)
)
,1,0)
set_df_kw['castfrom_gy'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may cast cardname from your graveyard", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"flashback {", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"jump-start", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"escape—{", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"disturb {", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"unearth {", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"retrace", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"embalm", regex=True)==True)
)
,1,0)
set_df_kw['lord'] = np.where(
(
(set_df_kw['type_line'].str.lower().str.contains("creature")) |
(set_df_kw['type_line'].str.lower().str.contains("artifact")) |
(set_df_kw['type_line'].str.lower().str.contains("enchantment"))
) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"get \+[1-9]/\+[0-9]", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(battle cry)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each (creature|other creature).*?gets \+[1-9]/\+[0-9]", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"until end of turn", regex=True)==False)
)
,1,0)
set_df_kw['upkeep_trigger'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"beginning of (your|enchanted player's|each|each player's) upkeep", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"beginning of your upkeep, sacrifice cardname", regex=True)==False)
)
,1,0)
set_df_kw['endstep_trigger'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"beginning of (your|enchanted player's|each) end step", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"sacrifice.*? at the beginning of your end step", regex=True)==False)
)
,1,0)
set_df_kw['landfall'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever a land enters the battlefield under your control", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"landfall", regex=True)==True)
)
,1,0)
set_df_kw['combat_trigger'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"beginning of (combat|each combat)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"deals combat damage", regex=True)==True)
)
,1,0)
set_df_kw['life_gain'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"gain (.*?) life", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"gains (.*?) x life", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"gain life equal", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(lifelink|extort)", regex=True)==True)
)
,1,0)
set_df_kw['treasure_tokens'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(f"(create|put) {regex_4}.*?treasure (token|tokens)", regex=True)==True)
)
,1,0)
set_df_kw['protection'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(f"(hexproof|ward|indestructible|shroud)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(f"can't (be|become) (the|target)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(f"protection from", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(f"becomes the target of a spell", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"becomes the target of.*?sacrifice (it|cardname)", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"becomes the target of.*?shuffle.*?into its owner's library", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"becomes.*?with hexproof.*?until end of turn", regex=True)==False)
)
,1,0)
set_df_kw['cost_reduction'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(delve|convoke|affinity|foretell|madness|miracle|spectacle)", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"evoke", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may pay.*?to cast this spell", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may pay (.*?) rather than pay", regex=True)==True)
)
,1,0)
set_df_kw['mana_multipliers'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(whenever|if).*?tap (a|an) (land|permanent|nonland permanent|plains|island|swamp|mountain|forest|creature) for mana.*?add (one mana|an additional|{(.*?)})", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(whenever|if).*?tap (a|an) (land|permanent|nonland permanent|plains|island|swamp|mountain|forest|creature) for mana.*?it produces.*?instead", regex=True)==True)
)
,1,0)
set_df_kw['card_selection'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"scry", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the top.*?bottom of your library.*?on top", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the top.*?on top.*?bottom of your library", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the top.*?graveyard.*?on top", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the top.*?on top.*?graveyard", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the top.*?you may put.*?into your graveyard", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"surveil", regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(explore|explores)", regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever a creature you control explores", regex=True)==False)
)
,1,0)
set_df_kw['whenever_cast'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(whenever you cast|prowess)",regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may transform", regex=True)==False)
)
,1,0)
set_df_kw['gain_control'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"gain control of",regex=True)==True)
)
,1,0)
set_df_kw['unblockeable'] = np.where(
(set_df_kw['type_line'].str.lower().str.contains("creature")) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(cardname|you control) can't be blocked",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(unblockable|shadow)",regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"cardname can't be blocked by", regex=True)==False)
)
,1,0)
set_df_kw['difficult_block'] = np.where(
(set_df_kw['type_line'].str.lower().str.contains("creature")) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"cardname can't be blocked by",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(menace|first strike|flying|deathtouch|double strike|fear|intimidate)",regex=True)==True)
)
,1,0)
set_df_kw['create_copy'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"create a copy of",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(that's|as) a copy of",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"copy (target|it|them|that spell|that ability)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may copy",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(storm)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"becomes a copy",regex=True)==True)
)
,1,0)
set_df_kw['milling'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(mill|mills)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"puts the top.*?of (their|his or her|your) library into (their|his or her|your) graveyard",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile the top (.*?) cards of (target|each) (player|opponent|players|opponents)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(target|each) opponent exiles cards from the top of their library",regex=True)==True)
)
,1,0)
set_df_kw['trigger_multiplier'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"triggers (one more|an additional) time",regex=True)==True)
)
,1,0)
set_df_kw['untapper'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"untap (target|that|another|the chosen|them|all)",regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"gain control", regex=True)==False)
)
,1,0)
set_df_kw['static_effects'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(artifacts and creatures|creatures|permanents) (your opponents|enchanted player|you) (control|controls) (enter|lose|have|with|can't)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"activated abilities of (artifacts|creatures).*?can't be activated",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"can't cause their controller to search their library",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"don't cause abilities to trigger",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"can't draw more than",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"only any time they could cast a sorcery",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"enchanted player",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"at the beginning of (your|each).*?(you|that player)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(players|counters) can't",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"if (you|target opponent|a player|another player) would.*?instead",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each (card|(.*?) card) in your (hand|graveyard).*?has",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(each player|players|your opponents) can't cast (spells|more than)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"is determined by their (power|toughness) rather than their (power|toughness)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"each creature.*?assigns combat damage.*?toughness rather than its power",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"they put half that many",regex=True)==True)
)
,1,0)
set_df_kw['damage_multipliers'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"it deals that much damage plus",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"it deals (double|triple) that damage",regex=True)==True)
)
,1,0)
set_df_kw['variable_pt'] = np.where(
(set_df_kw['power'].str.lower().str.contains("\\*")) |
(set_df_kw['toughness'].str.lower().str.contains("\\*"))
,1,0)
set_df_kw['agressive'] = np.where(
(set_df_kw['type_line'].str.lower().str.contains("creature")) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(haste|riot|dash)",regex=True)==True)
)
,1,0)
set_df_kw['doublers'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(put|it creates|it puts|create) twice that many",regex=True)==True)
)
,1,0)
set_df_kw['blinker'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile (up to (one|two) target|up to (one|two) other target|target|another target|any number of target) (creature|creatures|(.*?) creature|permanent|permanents|(.*?) permanent|(.*?) or creature).*?return.*?to the battlefield",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile (target|another target) (permanent|creature).*?return (that card|that permanent|it) to the battlefield under its owner's control",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"exile (two|three|four|five|all|each).*?you (control|own).*?then return (them|those).*?to the battlefield",regex=True)==True)
)
,1,0)
set_df_kw['graveyard_tutor'] = np.where(
(set_df_kw['ramp']!=1) &
(set_df_kw['tutor']!=1) &
(set_df_kw['face_type']!="Land") &
(set_df_kw['face_type']!="Snow Land") &
(set_df_kw['face_type']!="Artifact Land") &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"search your library for.*?put.*?into your graveyard", regex=True)==True)
)
,1,0)
set_df_kw['play_toplibrary'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"play with the top of your library",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may (play|cast).*?(from the|the) top of your library",regex=True)==True)
)
,1,0)
set_df_kw['life_lose'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(each opponent|each player|target opponent|target player).*?loses (.*?) life",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(afflict|extort)",regex=True)==True)
)
,1,0)
set_df_kw['play_from_graveyard'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may (play|cast).*?(land|permanent|creature|artifact).*?from your graveyard",regex=True)==True)
)
,1,0)
set_df_kw['infect'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"infect",regex=True)==True)
)
,1,0)
set_df_kw['disenchant'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(destroy|exile) (target|each|every) (artifact or enchantment|artifact|enchantment)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy (x) target (artifacts or enchantments|artifacts|enchantments)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy all (artifacts or enchantments|artifacts|enchantments)",regex=True)==True)
)
,1,0)
set_df_kw['venture'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"venture into the dungeon",regex=True)==True)
)
,1,0)
set_df_kw['animator'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"(target|another target).*?becomes a.*?creature",regex=True)==True)
)
&
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"copy", regex=True)==False) &
(set_df_kw['oracle_text'].str.lower().str.contains(r"class", regex=True)==False))
,1,0)
set_df_kw['wish'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may.*?from outside the game",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"learn",regex=True)==True)
)
,1,0)
set_df_kw['gy_synergies'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"gets.*?for each.*?in your graveyard",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"(dredge)",regex=True)==True)
)
,1,0)
set_df_kw['looting_similar'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"draw (a|one|two|three|four) (card|cards), then discard (a|one|two|three|four) (card|cards)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"discard (a|one|two|three|four) (card|cards)(,|:) (draw|then draw) (a|one|two|three|four) (card|cards)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"create (.*?) (blood|clue) token",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"cycling",regex=True)==True)
)
,1,0)
set_df_kw['cheatinto_play'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"creature.*?put (it|them) onto the battlefield",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"look at the.*?put.*?creature.*?onto the battlefield",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"you may put.*?(creature|permanent).*?onto the battlefield",regex=True)==True)
)
,1,0)
set_df_kw['pumped_foreach'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"gets \+[0-9]/\+[0-9] for each",regex=True)==True)
)
,1,0)
set_df_kw['ritual'] = np.where(
(
(set_df_kw['type_line'].str.lower().str.contains("instant")) |
(set_df_kw['type_line'].str.lower().str.contains("sorcery"))
) &
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"add {(.*?)}",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"add (.*?) {(.*?)}",regex=True)==True)
)
,1,0)
set_df_kw['no_maximum'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you have no maximum hand size",regex=True)==True)
)
,1,0)
set_df_kw['wheel'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"each player.*?(discards|shuffles (his or her|their) hand and graveyard into (his or her|their) library).*?then draws seven cards",regex=True)==True)
)
,1,0)
set_df_kw['extra_combat'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"additional combat phase",regex=True)==True)
)
,1,0)
set_df_kw['ghostly_prison'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"creatures can't attack (you|you or planeswalkers you control) unless",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"whenever an opponent attacks (you|with creatures)",regex=True)==True)
)
,1,0)
set_df_kw['land_destruction'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy target (land|nonbasic land)",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"destroy all lands",regex=True)==True)
)
,1,0)
set_df_kw['win_game'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you win the game",regex=True)==True)
)
,1,0)
set_df_kw['lose_game'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you lose the game",regex=True)==True)
)
,1,0)
set_df_kw['cant_lose'] = np.where(
(
(set_df_kw['oracle_text'].str.lower().str.contains(r"you can't lose the game",regex=True)==True) |
(set_df_kw['oracle_text'].str.lower().str.contains(r"your opponents can't win the game",regex=True)==True)
)
,1,0)
# Create Bins for Price categories
# Convert column types of prices to float
set_df_kw['usd'] = set_df_kw['usd'].astype(float)
set_df_kw['eur'] = set_df_kw['eur'].astype(float)
set_df_kw['tix'] = set_df_kw['tix'].astype(float)
# Create 5 categories
price_labels = ['bronze', 'silver', 'gold', 'platinum','diamond']
# Define the cuts of each category
usd_bins = [-1.00, 0.25, 1.00, 5.00, 10.00, 1000.00]
eur_bins = [-1.00, 0.25, 1.00, 5.00, 10.00, 1000.00]
tix_bins = [-1.00, 0.02, 0.05, 0.5, 1.00, 1000.00]
# Apply them to the price columns
set_df_kw['binusd'] = pd.cut(set_df_kw['usd'], bins=usd_bins, labels=price_labels)
set_df_kw['bineur'] = pd.cut(set_df_kw['eur'], bins=eur_bins, labels=price_labels)
set_df_kw['bintix'] = pd.cut(set_df_kw['tix'], bins=tix_bins, labels=price_labels)
# Convert the categorical columns to string
set_df_kw['binusd'] = set_df_kw['binusd'].astype(str)
set_df_kw['bineur'] = set_df_kw['bineur'].astype(str)
set_df_kw['bintix'] = set_df_kw['bintix'].astype(str)
# Column that groups abilities
# Define a list with all the super types we created
abilities_columns = ['counterspell', 'manarock', 'manadork', 'removal', 'wrath', 'ramp', 'tutor', 'cardraw', 'burn',
'discard', 'enters_bf', 'die_trigger', 'attack_trigger', 'pseudo_ramp', 'static_ramp',
'creature_tokens', 'extra_turn', 'plus1_counters', 'graveyard_hate', 'free_spells', 'bounce_spell',
'sac_outlet', 'sac_payoff', 'cant_counter', 'costx_more', 'costx_moreactivate', 'costx_less',
'costx_lessacitivate', 'whenever_opp', 'returnfrom_gy', 'reanimation', 'castfrom_gy', 'lord',
'upkeep_trigger', 'endstep_trigger', 'landfall', 'combat_trigger', 'life_gain', 'treasure_tokens', 'protection',
'cost_reduction', 'mana_multipliers', 'card_selection', 'whenever_cast', 'gain_control',
'unblockeable', 'difficult_block', 'create_copy', 'milling', 'trigger_multiplier', 'untapper',
'static_effects', 'damage_multipliers', 'variable_pt', 'agressive', 'doublers', 'blinker',
'graveyard_tutor', 'play_toplibrary', 'life_lose', 'play_from_graveyard', 'infect', 'disenchant',
'venture', 'animator', 'wish', 'gy_synergies', 'looting_similar', 'cheatinto_play', 'pumped_foreach',
'ritual', 'no_maximum', 'wheel', 'extra_combat', 'ghostly_prison', 'land_destruction', 'win_game',
'lose_game', 'cant_lose']
print(f"Total super abilities created: {len(abilities_columns)}")
# Create a column that sums them for each card
set_df_kw['total_abilites'] = set_df_kw[abilities_columns].sum(axis=1)
# Release date columns
# Convert the column to datetime
set_df_kw['released_at'] = | pd.to_datetime(set_df_kw['released_at']) | pandas.to_datetime |
import os
from multiprocessing import Pool
from tempfile import gettempdir
from typing import Dict, Union, Type, Any, Tuple, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from ray import tune
from tqdm import trange
from colosseum.agents.bayes_tools.conjugate_rewards import RewardsConjugateModel
from colosseum.agents.bayes_tools.conjugate_transitions import TransitionsConjugateModel
from colosseum.agents.continuous.ucrl2 import UCRL2Continuous
from colosseum.agents.episodic.psrl import PSRLEpisodic
from colosseum.experiments.experiment import MDPLoop
from colosseum.mdps import EpisodicMDP, ContinuousMDP
from colosseum.utils.acme.in_memory_logger import InMemoryLogger
from colosseum.utils.acme.specs import make_environment_spec
from colosseum.utils.miscellanea import ensure_folder
def run(
config: Dict[str, Any],
n_seeds: int,
mdp_kwargs: Dict[str, Any],
mdp_class : Union[Type["EpisodicMDP"], Type["ContinuousMDP"]],
max_time: float,
T: int,
verbose: Union[bool, str, None],
folder: Union[None, str],
i: int,
) -> Tuple[Dict[str, Any], List[float]]:
"""
Parameters
----------
config: Dict[str, Any].
the hyperparameters of the agent.
n_seeds: int.
the number of seeds over which the agent/MDP interaction is averaged.
mdp_kwargs: Dict[str, Any].
the parameters of the MDP.
mdp_class : Union[Type["EpisodicMDP"], Type["ContinuousMDP"]].
the class of the MDP.
max_time: float.
the maximum training time for the agent.
T: int.
the total number of time steps for the agent/MDP interaction.
verbose: Union[bool, str, None].
when it is set to bool it prints directly on the console and when it is set to a string it saves the outputs in
a file with such name.
folder: Union[None, str].
if given, the logs of the agent/MDP interaction will be stored in this folder.
i: int.
an integer id to assign to the logging file.
Returns
-------
a tuple containing the hyperparameters of the agent and the cumulative regrets for the different seeds.
"""
regrets = []
for seed in range(n_seeds):
mdp_kwargs["seed"] = seed
mdp_kwargs["force_single_thread"] = True
mdp = mdp_class(**mdp_kwargs)
if issubclass(mdp_class, EpisodicMDP):
agent = PSRLEpisodic(
environment_spec=make_environment_spec(mdp),
seed=seed,
H=mdp.H,
r_max=mdp.r_max,
T=T,
reward_prior_model=RewardsConjugateModel.N_NIG,
transitions_prior_model=TransitionsConjugateModel.M_DIR,
rewards_prior_prms=[config["a"], 1, 1, 1],
transitions_prior_prms=[config["b"]],
)
else:
agent = UCRL2Continuous(
environment_spec=make_environment_spec(mdp),
seed=seed,
r_max=mdp.r_max,
T=T,
alpha_p=config["a"],
alpha_r=config["b"],
bound_type_p="bernstein",
)
loop = MDPLoop(mdp, agent, logger=InMemoryLogger())
loop.run(T=T, verbose=verbose, max_time=max_time)
df_e = | pd.DataFrame(loop.logger.data) | pandas.DataFrame |
'''
This is code used to look at how the gas prices in Texas are changing
as a function of time. Underlaid, I have a moving average plotted, in
order to help guide the eye when comparing to the gasbuddy.com charts.
It's important to note that while these gas values are mostly coming
from the Bryan/College Station area, there are times when I purchased
gas on my drive to Austin or Dallas.
---> these locations are logged in "gasforcar" table
The original code for the moving average can be found here:
https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy/54628145
'''
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
#import matplotlib.colorbar as colorbar
import matplotlib.patheffects as PathEffects
import matplotlib.dates as md
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# reading in data
df = | pd.read_csv('gasforcar') | pandas.read_csv |
import re
import time
import logging
import functools
from datetime import timedelta
import numpy as np
import pandas as pd
from sqlalchemy import func
from db import Cluster, Event, ClusterType
log = logging.getLogger("dac-parser")
class EventParser:
events = [
# Custom starting event
"INIT",
# Indicates that the cluster is being created.
"CREATING",
# Indicates that a disk is low on space, but adding disks would
# put it over the max capacity.
"DID_NOT_EXPAND_DISK",
# Indicates that a disk was low on space and the disks were expanded.
"EXPANDED_DISK",
# Indicates that a disk was low on space and disk space
# could not be expanded.
"FAILED_TO_EXPAND_DISK",
# Indicates that the cluster scoped init script is starting.
"INIT_SCRIPTS_STARTING",
# Indicates that the cluster scoped init script has started.
"INIT_SCRIPTS_STARTED",
# Indicates that the cluster scoped init script has finished.
"INIT_SCRIPTS_FINISHED",
# Indicates that the cluster is being started.
"STARTING",
# Indicates that the cluster is being started.
"RESTARTING",
# Indicates that the cluster is being terminated.
"TERMINATING",
# Indicates that the cluster has been edited.
"EDITED",
# Indicates the cluster has finished being created.
# Includes the number of nodes in the cluster and a failure reason
# if some nodes could not be acquired.
"RUNNING",
# Indicates a change in the target size of the cluster
# (upsize or downsize).
"RESIZING",
# Indicates that nodes finished being added to the cluster.
# Includes the number of nodes in the cluster and a failure reason
# if some nodes could not be acquired.
"UPSIZE_COMPLETED",
# Indicates that some nodes were lost from the cluster.
"NODES_LOST",
# Indicates that the driver is healthy and
# the cluster is ready for use.
"DRIVER_HEALTHY",
# Indicates that the driver is unavailable.
"DRIVER_UNAVAILABLE",
# Indicates that a Spark exception was thrown from the driver.
"SPARK_EXCEPTION",
# Indicates that the driver is up but is not responsive,
# likely due to GC.
"DRIVER_NOT_RESPONDING",
# Indicates that the driver is up but DBFS is down.
"DBFS_DOWN",
# Indicates that the driver is up but the metastore is down.
"METASTORE_DOWN",
# Usage report containing the total and unused instance minutes
# of the autoscaling cluster over the last hour.
"AUTOSCALING_STATS_REPORT",
# Indicates that a node has been blacklisted by Spark.
"NODE_BLACKLISTED",
# Indicates that the cluster was pinned.
"PINNED",
# Indicates that the cluster was unpinned.
"UNPINNED",
]
states = [
'UNKNOWN',
'RUNNING',
'STOPPED'
]
transitions = {
# TODO: decide what is the correct state here
"PENDING": 'RUNNING',
"CREATING": 'RUNNING',
"STARTING": 'RUNNING',
"RESTARTING": 'RUNNING',
"TERMINATING": 'STOPPED',
"TERMINATED": 'STOPPED',
"RUNNING": 'RUNNING',
"UNKONWN": 'UNKNOWN',
}
instance_type_regex = re.compile(r'(([a-z]\d[a-z]?.[\d]*[x]?large)|'
r'((Standard_|Premium_)'
r'[a-zA-Z]{1,2}\d+[a-zA-Z]?(_v\d*)?))')
def __init__(self, instance_types: pd.DataFrame) -> None:
self.instance_type_map = instance_types
def parse(self, events: list, clusters: dict) -> pd.DataFrame:
timeline = self.process_events(events)
result = self.process_timelines(timeline, clusters)
return result
def process_events(self, events: list) -> list:
timeline = []
for event in events:
timeline.append(self.process_event(event))
return timeline
def process_event(self, event: dict) -> dict:
etype = event.get('type')
if etype not in self.events:
log.warning(f'Unkown event: {event}\n'
f'Recognized events are: {self.events}')
details = event.get('details', {})
user = details.get('user')
num_workers = details.get('current_num_workers')
# CREATED / EDITED event only
attributes = details.get('attributes', {})
# cluster_name = attributes.get('cluster_name')
driver_type = attributes.get('driver_node_type_id')
worker_type = attributes.get('node_type_id')
return {'timestamp': event.get('timestamp', 0),
'cluster_id': event.get('cluster_id'),
'user_id': user,
'event': etype,
'driver_type': driver_type,
'worker_type': worker_type,
'num_workers': num_workers}
def process_timelines(self,
raw_timeline: list,
clusters: dict) -> pd.DataFrame:
timelines = {}
for event in raw_timeline:
cluster = event['cluster_id']
if cluster not in timelines:
timelines[cluster] = []
timelines[cluster].append(event)
dfs = []
for cluster_id, timeline in timelines.items():
cluster_name = clusters.get(cluster_id, 'UNKNOWN')
dfs.append(self.process_timeline(timeline, cluster_name))
return pd.concat(dfs, sort=False)
def process_timeline(self,
timeline: list,
cluster_name: str) -> pd.DataFrame:
# Empty timeline
if not len(timeline):
return pd.DataFrame()
sorted_timeline = sorted(timeline, key=lambda x: x['timestamp'])
# initial event
first = sorted_timeline[0]
init = {key: None for key in first.keys()}
init['timestamp'] = first['timestamp']
init['event'] = 'INIT'
timeline = [init] + sorted_timeline
frm = timeline[:-1]
to = timeline[1:]
rows = []
status = {
'timestamp': first['timestamp'],
'cluster_id': first['cluster_id'],
'state': 'UNKNOWN',
'user_id': 'UNKNOWN',
'driver_type': first['driver_type'],
'worker_type': first['worker_type'],
'num_workers': 0,
'interval': 0
}
for frm_event, to_event in zip(frm, to):
delta = to_event['timestamp'] - frm_event['timestamp']
delta = timedelta(milliseconds=delta)
delta = delta.seconds / 3600
row = status.copy()
row['interval'] = delta
rows.append(row)
status = self.get_new_status(status, to_event)
cluster_type = self.determine_cluster_type(cluster_name)
# exclude starting status
df = | pd.DataFrame(rows[1:]) | pandas.DataFrame |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utilities for uniprot predictions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
from typing import Dict, FrozenSet, List, Optional, Sequence, Set, Text, Tuple, Union
import numpy as np
import pandas as pd
from pandas.core.groupby.generic import DataFrameGroupBy as pd_DataFrameGroupBy
import inference
import parenthood_lib
import sklearn.metrics
import tqdm
FALSE_NEGATIVES_KEY = 'false_negatives'
FALSE_POSITIVES_KEY = 'false_positives'
TRUE_POSITIVES_KEY = 'true_positives'
PrecisionRecallF1 = Tuple[float, float, float]
def normalize_confidences(
predictions, label_vocab,
applicable_label_dict):
"""Set confidences of parent labels to the max of their children.
Args:
predictions: [num_sequences, num_labels] ndarray.
label_vocab: list of vocab strings in an order that corresponds to
`predictions`.
applicable_label_dict: Mapping from labels to their parents (including
indirect parents).
Returns:
A numpy array [num_sequences, num_labels] with confidences where:
if label_vocab[k] in applicable_label_dict[label_vocab[j]],
then arr[i, j] >= arr[i, k] for all i.
"""
vocab_indices = {v: i for i, v in enumerate(label_vocab)}
children = parenthood_lib.reverse_map(applicable_label_dict,
set(vocab_indices.keys()))
# Only vectorize this along the sequences dimension as the number of children
# varies between labels.
label_confidences = []
for label in label_vocab:
child_indices = np.array(
[vocab_indices[child] for child in children[label]])
if child_indices.size > 1:
confidences = np.max(predictions[:, child_indices], axis=1)
label_confidences.append(confidences)
else:
label_confidences.append(predictions[:, vocab_indices[label]])
return np.stack(label_confidences, axis=1)
def get_ground_truth_multihots(label_sets,
label_vocab):
"""Get a multihot matrix from label sets and a vocab."""
vocab_indices = {v: i for i, v in enumerate(label_vocab)}
ground_truths = []
for s in label_sets:
indices = np.array([vocab_indices[v] for v in s], dtype=np.int32)
multihots = np.zeros([len(label_vocab)])
multihots[indices] = 1
ground_truths.append(multihots)
return np.vstack(ground_truths)
def get_pr_f1_df_from_arrays(
ground_truths,
normalized_predictions,
prediction_precision_limit = None,
):
"""Convenience method for making a PR curve dataframe.
Args:
ground_truths: multihot array of shape (num_examples, num_labels).
normalized_predictions: array of shape (num_samples, num_labels).
prediction_precision_limit: Used to truncate the predictions to a fixed
level of precision. Predictions are truncated to
`prediction_precision_limit` number of decimal places. This argument is
useful to increase the speed of computation, and also to decrease the size
of the dataframe returned. If None, no truncation is performed.
Returns:
pd.DataFrame with columns precision (float); recall (float);
threshold (float); f1 (float).
"""
if prediction_precision_limit:
normalized_predictions = np.around(normalized_predictions,
prediction_precision_limit)
precisions, recalls, thresholds = sklearn.metrics.precision_recall_curve(
ground_truths.flatten(), normalized_predictions.flatten())
# Throw away last precision and recall as they are always 0 and 1
# respectively, and have no associated threshold
precisions = precisions[:-1]
recalls = recalls[:-1]
f1s = 2 * (precisions * recalls) / (precisions + recalls)
return pd.DataFrame(
data={
'precision': precisions,
'recall': recalls,
'threshold': thresholds,
'f1': f1s
})
def get_pr_f1_df(
prediction_df,
label_vocab,
label_normalizer,
eval_vocab = None,
prediction_precision_limit = 18,
):
"""Make a dataframe with each possible threshold and it's corresponding values.
Args:
prediction_df: A dataframe with columns `predictions` and `true_label`.
label_vocab: A list of labels.
label_normalizer: A mapping from labels to their children.
eval_vocab: An optional subset of `label_vocab` on which to restrict
analysis.
prediction_precision_limit: Used to truncate the predictions to a fixed
level of precision. Predictions are truncated to
`prediction_precision_limit` number of decimal places. This argument is
useful to increase the speed of computation, and also to decrease
the size of the dataframe returned. If None, no truncation is performed.
Returns:
A dataframe with 4 columns; precision, recall, f1, and threshold. At each
threshold precision, recall, and f1 are calculated relative to the
normalized confidences and true labels given in `prediction_df`.
"""
if not eval_vocab:
eval_vocab = set(label_vocab)
label_vocab = np.array(label_vocab)
prediction_array = np.vstack(prediction_df.predictions)
normalized_predictions = normalize_confidences(prediction_array, label_vocab,
label_normalizer)
true_label_sets = prediction_df.true_label.apply(
eval_vocab.intersection).values
eval_indices = np.array(
[i for i, v in enumerate(label_vocab) if v in eval_vocab])
ground_truths = get_ground_truth_multihots(true_label_sets,
label_vocab[eval_indices])
return get_pr_f1_df_from_arrays(ground_truths,
normalized_predictions[:, eval_indices],
prediction_precision_limit)
def true_false_positive_negative_df(df):
"""Computes df of all example/label pairs, and whether they were correct.
Args:
df: pd.Dataframe that has columns: true_label. Contains a set of true
labels. predicted_label. Contains a set of true labels. sequence_name.
string. Accession.
Returns:
pd.DataFrame that has columns:
sequence_name. string. Name of sequence (accession).
class. string. Class name. Either predicted or true.
predicted. np.bool. Whether the class was predicted for the sequence.
true. np.bool. Whether the class label is true for the sequence.
true_positive. Whether the prediction is a true positive.
false_positive. Whether the prediction is a false positive.
false_negative. Whether the prediction is a false negative.
"""
dict_prep_for_df = {
'sequence_name': [],
'class': [],
'predicted': [],
'true': []
}
for _, row in tqdm.tqdm(df.iterrows(), position=0, total=len(df)):
all_classes = row.predicted_label.union(row.true_label)
for cls in all_classes:
dict_prep_for_df['sequence_name'].append(row.sequence_name)
dict_prep_for_df['class'].append(cls)
dict_prep_for_df['predicted'].append(cls in row.predicted_label)
dict_prep_for_df['true'].append(cls in row.true_label)
working_df = | pd.DataFrame(dict_prep_for_df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 14:37:03 2019
@author: ppradeep
"""
import os
clear = lambda: os.system('cls')
clear()
## Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import pickle
# Classifiers
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm, preprocessing
path = 'C:/Users/Administrator/OneDrive/Profile/Desktop/HTTK/'
#path = 'Z:/Projects/HTTK/'
#%%
# Normalize descriptors: Transform variables to mean=0, variance=1
def normalizeDescriptors(X):
scaler = preprocessing.StandardScaler().fit(X)
transformed = scaler.transform(X)
x_norm = pd.DataFrame(transformed, index = X.index)
x_norm.columns = X.columns
return(scaler, x_norm)
#%%
###########################################################################
###########################################################################
## Build the final models
###########################################################################
###########################################################################
####-----------------------------------------------------------------------------------------------------------------
## Read training data
####-----------------------------------------------------------------------------------------------------------------
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
####-----------------------------------------------------------------------------------------------------------------
## Read training fingerprints
####-----------------------------------------------------------------------------------------------------------------
## Chemotyper FPs: 779 Toxprints
df_chemotypes = pd.read_csv(path+'data/toxprint.txt', sep = ';', index_col='M_NAME') #Rename 'M_NAME' to 'CAS' in data file
## PubChem FPs: 881 bits
df_pubchem = pd.read_csv(path+'data/pubchem.txt', index_col='row ID')
####-----------------------------------------------------------------------------------------------------------------
## Read continuous descriptors
####-----------------------------------------------------------------------------------------------------------------
### OPERA descriptors
df_opera = pd.read_csv(path+'data/OPERA2.5_Pred.csv', index_col='MoleculeID')[['LogP_pred','pKa_a_pred', 'pKa_b_pred']] #In MOE: Right click on mol -> Name -> Extract -> new field 'CAS'
df_opera['pKa_pred']=df_opera[['pKa_a_pred','pKa_b_pred']].min(axis=1)
opera_scaler, opera = normalizeDescriptors(df_opera)#[['pKa_pred','LogP_pred']]
opera = opera[['pKa_pred','LogP_pred']]
## PADEL descriptors
df_padel = pd.read_csv(path+'data/padel.txt', index_col='Name').dropna()
padel_scaler, padel = normalizeDescriptors(df_padel)
## CDK descriptors
df_cdk = pd.read_csv(path+'data/cdk.txt', index_col='row ID').dropna() #Add CAS column to file
cdk_scaler, cdk = normalizeDescriptors(df_cdk)
#%%
####-----------------------------------------------------------------------------------------------------------------
## Save the normalization vector
####-----------------------------------------------------------------------------------------------------------------
pickle.dump(opera_scaler, open(path+'output/opera_scaler.sav', 'wb'))
pickle.dump(padel_scaler, open(path+'output/padel_scaler.sav', 'wb'))
pickle.dump(cdk_scaler, open(path+'output/cdk_scaler.sav', 'wb'))
#%%
####-----------------------------------------------------------------------------------------------------------------
## Features from the 5-fold CV model
####-----------------------------------------------------------------------------------------------------------------
fub_features = pd.read_csv(path+'output/Human.Funbound.plasma_Features.csv')
clint_features_clas = pd.read_csv(path+'output/Clint_Features_Classification.csv')
clint_features_reg = pd.read_csv(path+'output/Clint_Features_Regression.csv')
#%%
####-----------------------------------------------------------------------------------------------------------------
## Model for Fraction Unbound in Plasma
####-----------------------------------------------------------------------------------------------------------------
y_var = 'Human.Funbound.plasma'
# Create a new dataframe with chemical names and y variable value based on raw data
casList = list(set(data1.index.tolist()+data2.index.tolist()+data3.index.tolist()))
data = pd.DataFrame(index = casList, columns = ['Name',y_var])
# Update the training data. If y value is available from later data (data 2 or 3) use that, if not use from old data (data1)
for cas in data.index:
try:
if cas in data1.index:
data.loc[cas,'Name'] = data1.loc[cas,'Name']
data.loc[cas,y_var] = data1.loc[cas,y_var]
if cas in data2.index:
data.loc[cas,'Name'] = data2.loc[cas,'Name']
data.loc[cas,y_var] = data2.loc[cas,y_var]
except:
pass
data.dropna(inplace = True) #Retain data with y variable values
#%%
####-----------------------------------------------------------------------------------------------------------------
## Extract y data
####-----------------------------------------------------------------------------------------------------------------
Y = data[y_var]
## Set data for modeling
## Transform Y
Y[Y==1.0] = 0.99
Y[Y==0] = 0.005
Y_model = (1-Y)/Y
Y_model = Y_model.apply(lambda x: np.log10(x))
Y_index = Y_model.index
#%%
####-----------------------------------------------------------------------------------------------------------------
## Combine fingerprints
####-----------------------------------------------------------------------------------------------------------------
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_index,:].dropna()
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in fub_features.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("c]",'c') ##manually check the last entry and correct it
fingerprints = fingerprints.loc[:,retain]
####-----------------------------------------------------------------------------------------------------------------
## Combine descriptors
####-----------------------------------------------------------------------------------------------------------------
descriptors = pd.concat([padel, cdk], axis=1).dropna()
descriptors = descriptors.loc[Y_index,:].dropna()
# Select descriptors from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in fub_features.ix[0,'Padel+CDK'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
descriptors = descriptors.loc[:,retain]
####-----------------------------------------------------------------------------------------------------------------
## Combine all the descriptors and set the X and Y for training the model
####-----------------------------------------------------------------------------------------------------------------
data = pd.concat([Y_model, fingerprints, opera], axis=1).dropna(axis=0, how='any')
X_fub_model = data.ix[:, data.columns != y_var]
Y_fub_model = data[y_var]
meanY = np.mean(Y_fub_model)
stdY = np.std(Y_fub_model)
#%%
## Histogram of the final training set
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 6), dpi = 300)
Y_fub_model.hist(alpha = 0.75, color = 'r', grid = False)
plt.annotate('N = %d' %len(Y_fub_model), [-2.5,200], size = 20)
plt.annotate('$\mu = %0.2f$' %(meanY), [-2.5,185], size = 20)
plt.annotate('$\sigma = %0.2f$' %(stdY), [-2.5,170], size = 20)
plt.xlabel('Fub$_{tr}$', size = 24, labelpad = 10)
plt.ylabel('Frequency', size = 24, labelpad = 10)
plt.xticks(fontsize = 24)#, rotation = 90)
plt.yticks(fontsize = 24)
plt.savefig(path+'/output/%s_TrainingData.png' %y_var, bbox_inches='tight')
plt.show()
data.to_csv(path+'output/fub_trainingdata.csv', index_label='CASRN')
#%%
####-----------------------------------------------------------------------------------------------------------------
## Develop model
clf_fub1 = svm.SVR(epsilon = 0.1, C = 10, gamma = 0.01, kernel = "rbf")
clf_fub1 = clf_fub1.fit(X = X_fub_model, y = Y_fub_model)
clf_fub2 = RandomForestRegressor(max_features = 'auto', n_estimators = 1000, random_state = 5)
clf_fub2 = clf_fub2.fit(X = X_fub_model, y = Y_fub_model)
#
## Save the models to disk
pickle.dump(clf_fub1, open(path+'output/fub_svr.sav', 'wb'))
pickle.dump(clf_fub2, open(path+'output/fub_rf.sav', 'wb'))
#%%
###########################################################################
## Models for Intrinsic Clearance
###########################################################################
###########################################################################
## Read and analyze input data
###########################################################################
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
## HTTK package data
# Set y variable
y_var = 'Human.Clint'
# Create a new dataframe with chemical names and y variable value based on raw data
casList = list(set(data1.index.tolist()+data2.index.tolist()+data3.index.tolist()))
#%%
data = pd.DataFrame(index = casList, columns = ['Name',y_var])
#%%
# Update the training data. If y value is available from later data (data 2 or 3) use that, if not use from old data (data1)
for cas in data.index:
try:
if cas in data1.index:
data.loc[cas,'Name'] = data1.loc[cas,'Name']
data.loc[cas,y_var] = data1.loc[cas,y_var]
if cas in data2.index:
data.loc[cas,'Name'] = data2.loc[cas,'Name']
data.loc[cas,y_var] = data2.loc[cas,y_var]
except:
pass
data.dropna(inplace = True) #Retain data with y variable values
#%%
## Transform the data: Bin the clearance variable for classification
Y = data[y_var]
Y_clas = Y.copy()
[Y_clas.set_value(idx, int(-3)) for idx in Y_clas[Y_clas <= 0.9].index]
[Y_clas.set_value(idx, int(-2)) for idx in Y_clas[(Y_clas > 0.9) & (Y_clas <= 50)].index]
[Y_clas.set_value(idx, int(-1)) for idx in Y_clas[Y_clas > 50].index]
Y_clas = pd.Series(Y_clas, index = Y.index)
low_median = Y[Y_clas[Y_clas==-3].index].median()
high_median = Y[Y_clas[Y_clas==-1].index].median()
###########################################################################
## Classification:
## Combine fingerprints and perform feature selection
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_clas.index,:].dropna()
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_clas.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
fingerprints = fingerprints.loc[:,retain]
#%%
## Classification: Combine all the descriptors and set the X and Y for training the classification model
data = pd.concat([Y_clas, fingerprints, opera], axis=1).dropna(axis=0, how='any')
X_ClintClas_model = data.ix[:, data.columns != y_var]
Y_ClintClas_model = data[y_var]
#%%
data.to_csv(path+'output/clintclas_trainingdata.csv', index_label='CASRN')
#%%
## Histogram of the final training set
import matplotlib.pyplot as plt
plt.gcf().subplots_adjust(bottom=0.5)
plt.figure(figsize=[8,6], dpi = 300)
plt.hist(Y_ClintClas_model.values.tolist(), color = 'r', align = 'left', rwidth = 1)
plt.annotate('N = %d' %len(Y_ClintClas_model.values.tolist()), [-3.15,260], size = 24)
labels = ['Low', 'Medium', 'High']
plt.xticks([-3, -2, -1], labels, size = 18)
plt.xlabel('Transformed Clearance \n(for classification)', size = 28, labelpad = 5)
plt.ylabel('Frequency', size = 28, labelpad = 5)
plt.xticks(fontsize = 20)#, rotation = 90)
plt.yticks(fontsize = 20)
plt.savefig(path+'output/%sClas_TrainingData.png'%y_var, bbox_inches='tight')
#%%
###########################################################################
## Develop classification model
## Classification model
clf_clintClas = svm.SVC(C=10, decision_function_shape='ovo', gamma=0.01, kernel='rbf')
clf_clintClas = clf_clintClas.fit(X = X_ClintClas_model, y = Y_ClintClas_model.values.tolist())
#%%
###########################################################################
## Intrinsic Clearance Regression
###########################################################################
## Regression
## Extract y data for regression
Y_reg = Y[(Y > 0.9) & (Y <= 50)]
## Transform Y
Y_reg = Y_reg.apply(lambda x: np.log10(x))
## Combine fingerprints and perform feature selection
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1).dropna()
fingerprints = fingerprints.loc[Y_reg.index,:].dropna()
#%%
# Select fingerprints from the feature file
retain = [str(val.replace("'", "").replace(" ", "")) for val in clint_features_reg.ix[0,'Fingerprints'].split(',')]
retain[0] = retain[0].replace("[", "")
retain[len(retain)-1] = retain[len(retain)-1].replace("]",'')
fingerprints = fingerprints.loc[:,retain]
descriptors = | pd.concat([padel, cdk], axis=1) | pandas.concat |
import unittest
import pandas as pd
import skfda
class TestPandas(unittest.TestCase):
def setUp(self) -> None:
self.fd = skfda.FDataGrid(
[[1, 2, 3, 4, 5, 6, 7], [2, 3, 4, 5, 6, 7, 9]])
self.fd_basis = self.fd.to_basis(skfda.representation.basis.BSpline(
n_basis=5))
def test_fdatagrid_series(self) -> None:
series = | pd.Series(self.fd) | pandas.Series |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = | tslib.cast_to_nanoseconds(values) | pandas.tslib.cast_to_nanoseconds |
# -*- coding: utf-8 -*-
# Name: pypandas
# Version: 0.1a3
# Owner: <NAME>
import pandas as pd
def employees_num(engine):
"""Get number of employees"""
df = pd.read_sql_table('employees', con=engine)
num = df.employee_id.count()
return num
def sales_num_per_category(engine):
"""Get sales per category"""
df1 = pd.read_sql_table('order_details', con=engine)
df1 = df1[['order_id', 'product_id']]
df2 = pd.read_sql_table('products', con=engine)
df2 = df2[['product_id', 'category_id']]
df3 = pd.merge(df1, df2, on='product_id', how='outer')
df4 = pd.read_sql_table('categories', con=engine)
df4 = df4[['category_id', 'category_name']]
df5 = pd.merge(df3, df4, on='category_id', how='outer')
df5 = df5[['order_id', 'category_name']].drop_duplicates()
tmp = df5.groupby(['category_name']).count()['order_id']
tmp = tmp.to_frame(name='orders').sort_values(['orders'], ascending=False)
result = tmp.to_dict()['orders']
return result
def sales_num_per_region(engine):
"""Get total sales per region"""
df = pd.read_sql_table('orders', con=engine)
tmp = df.groupby(['ship_region']).count()['order_id']
tmp = tmp.to_frame(name='orders').sort_values(['orders'], ascending=False)
result = tmp.to_dict()['orders']
return result
def statistics_per_year(engine):
"""Get sales_overview for each year.
Total sales, total products sold, best customer country,
best selling category.
"""
data = {}
data['year'] = []
data['sales_num'] = []
data['products_num_sold'] = []
data['top_country'] = []
data['top_category'] = []
df1 = pd.read_sql_table('order_details', con=engine)
df2 = pd.read_sql_table('orders', con=engine)
df3 = pd.merge(df1, df2, on='order_id', how='outer')
df4 = pd.read_sql_table('products', con=engine)
df5 = pd.merge(df3, df4, on='product_id', how='outer')
df6 = pd.read_sql_table('categories', con=engine)
df7 = pd.merge(df5, df6, on='category_id', how='outer')
df7 = df7[['order_id', 'order_date', 'quantity', 'unit_price_x',
'ship_country', 'category_name']]
df7['year'] = df7.order_date.dt.year
years = sorted(df7.year.unique())
for year in years:
data['year'].append(year)
num = len(df7[df7.year == year]['order_id'].unique())
data['sales_num'].append(num)
num = df7[df7.year == year].quantity.sum()
data['products_num_sold'].append(num)
value = df7[df7.year == year].groupby(['ship_country']) \
.count()['order_id'].to_frame(name='orders') \
.sort_values(['orders'], ascending=False).head(1).index[0]
data['top_country'].append(value)
value = df7[df7.year == year].groupby(['category_name']) \
.count()['order_id'].to_frame(name='orders') \
.sort_values(['orders'], ascending=False).head(1).index[0]
data['top_category'].append(value)
return data
def suppliers_num(engine):
"""Get number of suppliers"""
df = pd.read_sql_table('suppliers', con=engine)
num = df.supplier_id.count()
return num
def top_countries(engine, limit=5):
"""Get top selling countries, sales per country"""
df = | pd.read_sql_table('orders', con=engine) | pandas.read_sql_table |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user import get_user_function
def get_line_chart(function=None):
positions = get_positions(check_submitter=False)
budget = get_budget()
columns = [row.keys() for row in positions]
positions = pd.DataFrame(positions, columns=columns[0])
budget = pd.DataFrame(budget, columns=columns[0])
if function:
if function != 'All':
positions = positions.loc[positions['function'] == function]
budget = budget.loc[budget['function'] == function]
if g.user['type'] != 'ADMIN' and function == 'All':
functions = get_user_function(g.user['id'])
function_names = [get_function(function['function_id'])['name'] for function in functions]
positions = positions.loc[positions['function'].isin(function_names)]
budget = budget.loc[budget['function'].isin(function_names)]
positions['FTE'] = pd.to_numeric(positions['hours'], errors='coerce') / 40
budget['FTE'] = pd.to_numeric(budget['hours'], errors='coerce') / 40
positions['salary'] = pd.to_numeric(positions['salary'], errors='coerce')
positions['fringe_benefit'] = pd.to_numeric(positions['fringe_benefit'], errors='coerce')
positions['social_security_contribution'] = pd.to_numeric(positions['social_security_contribution'], errors='coerce')
budget['salary'] = pd.to_numeric(budget['salary'], errors='coerce')
budget['fringe_benefit'] = pd.to_numeric(budget['fringe_benefit'], errors='coerce')
budget['social_security_contribution'] = pd.to_numeric(budget['social_security_contribution'], errors='coerce')
positions['total_cost'] = positions['salary'].add(positions['fringe_benefit'], fill_value=0).add(positions['social_security_contribution'], fill_value=0)
budget['total_cost'] = budget['salary'].add(budget['fringe_benefit'], fill_value=0).add(budget['social_security_contribution'], fill_value=0)
positions['start_date'] = pd.to_datetime(positions['start_date'], errors='coerce')
positions['end_date'] = pd.to_datetime(positions['end_date'], errors='coerce')
budget['start_date'] = pd.to_datetime(budget['start_date'], errors='coerce')
budget['end_date'] = | pd.to_datetime(budget['end_date'], errors='coerce') | pandas.to_datetime |
import pandas as pd
import requests
import hmac
import hashlib
import json
import base64
import time
import math
from .base import BaseAPIWrapper, private_api
from .exception import ExchangeAPIException
def truncate(f, n):
return math.floor(round(f * 10 ** n, n)) / 10 ** n
class CoinoneAPIWrapper(BaseAPIWrapper):
host = 'https://api.coinone.co.kr/'
error_messages = {
'4': 'Blocked user access',
'11': 'Access token is missing',
'12': 'Invalid access token',
'40': 'Invalid API permission',
'50': 'Authenticate error',
'51': 'Invalid API',
'52': 'Deprecated API',
'53': 'Two Factor Auth Fail',
'100': 'Session expired',
'101': 'Invalid format',
'102': 'ID is not exist',
'103': 'Lack of Balance',
'104': 'Order id is not exist',
'105': 'Price is not correct',
'106': 'Locking error',
'107': 'Parameter error',
'111': 'Order id is not exist',
'112': 'Cancel failed',
'113': 'Quantity is too low(ETH, ETC > 0.01)',
'120': 'V2 API payload is missing',
'121': 'V2 API signature is missing',
'122': 'V2 API nonce is missing',
'123': 'V2 API signature is not correct',
'130': 'V2 API Nonce value must be a positive integer',
'131': 'V2 API Nonce is must be bigger then last nonce',
'132': 'V2 API body is corrupted',
'141': 'Too many limit orders',
'150': "It's V1 API. V2 Access token is not acceptable",
'151': "It's V2 API. V1 Access token is not acceptable",
'200': 'Wallet Error',
'202': 'Limitation error',
'210': 'Limitation error',
'220': 'Limitation error',
'221': 'Limitation error',
'310': 'Mobile auth error',
'311': 'Need mobile auth',
'312': 'Name is not correct',
'330': 'Phone number error',
'404': 'Page not found error',
'405': 'Server error',
'429': 'Too Many Requests',
'444': 'Locking error',
'500': 'Email error',
'501': 'Email error',
'777': 'Mobile auth error',
'778': 'Phone number error',
'779': 'Address error',
'1202': 'App not found',
'1203': 'Already registered',
'1204': 'Invalid access',
'1205': 'API Key error',
'1206': 'User not found',
'1207': 'User not found',
'1208': 'User not found',
'1209': 'User not found'
}
def __init__(self, api_key=None, secret_key=None):
super().__init__('coinone', api_key, secret_key)
# self.ticker_keys = ['high', 'low', 'last', 'first', 'volume',
# 'yesterday_high', 'yesterday_low', 'yesterday_last', 'yesterday_first', 'yesterday_volume']
def get_base_payload(self):
return {
'access_token': self.api_key
}
@staticmethod
def get_encoded_payload(payload):
payload['nonce'] = int(time.time()*1000)
dumped_json = json.dumps(payload)
encoded_json = base64.b64encode(dumped_json.encode())
return encoded_json
def get_signature(self, encoded_payload):
signature = hmac.new(self.secret_key.upper().encode(), encoded_payload, hashlib.sha512)
return signature.hexdigest()
def prepare_request(self, method, endpoint, **kwargs):
params = dict(**kwargs.get('additional_params', {}))
payload = dict(**kwargs.get('additional_payload', {}))
headers = {
'Content-Type': 'application/json'
}
if kwargs.get('private', True):
payload = self.get_encoded_payload(dict(payload, **self.get_base_payload()))
signature = self.get_signature(payload)
headers = dict(headers, **{
'X-COINONE-PAYLOAD': payload,
'X-COINONE-SIGNATURE': signature
})
return requests.Request(
method=method,
url=(self.host + endpoint),
headers=headers,
params=params,
data=payload
).prepare()
def is_req_succeed(self, resp):
try:
data = resp.json()
return int(data.get('errorCode')) == 0
except Exception as e:
return json.loads(resp.content.decode('utf-8').replace('“','"').replace('”','"')).get("errorCode") == 0
def get_error_message(self, resp):
try:
data = resp.json()
return self.error_messages.get(str(data.get('errorCode')))
except Exception as e:
return self.error_messages.get(json.loads(resp.content.decode('utf-8').replace('“','"').replace('”','"')).get("errorCode"))
def get_ticker(self, currency, *args, **kwargs):
endpoint = 'ticker/'
resp = self.request('GET', endpoint, additional_params={
'currency': currency
}, private=False)
if not self.is_req_succeed(resp):
return self.get_error_message(resp)
else:
resp = resp.json()
return {k: resp[k] for k in resp.keys() & self.ticker_keys}
def get_transactions(self, currency, *args, **kwargs):
endpoint = 'trades/'
resp = self.request('GET', endpoint, additional_params={
'currency': currency,
'period': kwargs.get('period', 'hour')
}, private=False)
if not self.is_req_succeed(resp):
return self.get_error_message(resp)
else:
resp = resp.json()
return resp.get('completeOrders')
def get_orderbook(self, currency, fiat, limit, *args, **kwargs):
endpoint = 'orderbook/'
resp = self.request('GET', endpoint, additional_params={
'currency': currency
}, private=False)
if not self.is_req_succeed(resp):
return self.get_error_message(resp)
else:
resp = resp.json()
orderbook_dict = {'ask_price': list(), 'ask_quantity': list(), 'bid_price': list(), 'bid_quantity': list()}
for i in resp['ask'][:limit]:
for k, v in i.items():
orderbook_dict['ask_{}'.format('quantity' if k == 'qty' else k)].append(float(v))
for i in resp['bid'][:limit]:
for k, v in i.items():
orderbook_dict['bid_{}'.format('quantity' if k =='qty' else k)].append(float(v))
return {'orderbook': | pd.DataFrame(orderbook_dict) | pandas.DataFrame |
#!/usr/bin/env python
"""
Functions used to generate results tables.
Used for results in 'Dynamic nested sampling: an improved algorithm for nested
sampling parameter estimation and evidence calculation' (Higson et al., 2019).
"""
import copy
import pandas as pd
import numpy as np
import nestcheck.io_utils as iou
import nestcheck.ns_run_utils
import nestcheck.error_analysis
import nestcheck.parallel_utils as pu
import nestcheck.pandas_functions as pf
import perfectns.nested_sampling as ns
import perfectns.priors as priors
import perfectns.estimators as e
@iou.timing_decorator
def get_dynamic_results(n_run, dynamic_goals_in, estimator_list_in,
settings_in, **kwargs):
"""
Generate data frame showing the standard deviations of the results of
repeated calculations and efficiency gains (ratios of variances of results
calculations) from different dynamic goals. To make the comparison fair,
for dynamic nested sampling settings.n_samples_max is set to slightly below
the mean number of samples used by standard nested sampling.
This function was used for Tables 1, 2, 3 and 4, as well as to generate the
results shown in figures 6 and 7 of 'Dynamic nested sampling: an improved
algorithm for nested sampling parameter estimation and evidence
calculation' (Higson et al., 2019). See the paper for a more detailed
description.
Parameters
----------
n_run: int
how many runs to use
dynamic_goals_in: list of floats
which dynamic goals to test
estimator_list_in: list of estimator objects
settings_in: PerfectNSSettings object
load: bool, optional
should run data and results be loaded if available?
save: bool, optional
should run data and results be saved?
overwrite_existing: bool, optional
if a file exists already but we generate new run data, should we
overwrite the existing file when saved?
run_random_seeds: list, optional
list of random seeds to use for generating runs.
parallel: bool, optional
cache_dir: str, optional
Directory to use for caching.
tuned_dynamic_ps: list of bools, same length as dynamic_goals_in, optional
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node and therefore there will be no speedup from multiple
nodes (must specify manually in this case).
Returns
-------
results: pandas data frame
results data frame.
Contains rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
"""
load = kwargs.pop('load', False)
save = kwargs.pop('save', False)
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
cache_dir = kwargs.pop('cache_dir', 'cache')
overwrite_existing = kwargs.pop('overwrite_existing', True)
run_random_seeds = kwargs.pop('run_random_seeds', list(range(n_run)))
tuned_dynamic_ps = kwargs.pop('tuned_dynamic_ps',
[False] * len(dynamic_goals_in))
assert len(tuned_dynamic_ps) == len(dynamic_goals_in)
for goal in dynamic_goals_in:
assert goal is not None, \
'Goals should be dynamic - standard NS already included'
# Add a standard nested sampling run for comparison:
dynamic_goals = [None] + dynamic_goals_in
tuned_dynamic_ps = [False] + tuned_dynamic_ps
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# Make a copy of the input settings to stop us editing them
settings = copy.deepcopy(settings_in)
# make save_name
save_root = 'dynamic_test'
for dg in dynamic_goals_in:
save_root += '_' + str(dg).replace('.', '_')
save_root += '_' + settings.save_name(include_dg=False)
save_root += '_' + str(n_run) + 'reps'
save_file = cache_dir + '/' + save_root + '.pkl'
# try loading results
if load:
try:
return pd.read_pickle(save_file)
except OSError:
print('Could not load file: ' + save_file)
# start function
# --------------
# get info on the number of samples taken in each run as well
estimator_list = [e.CountSamples()] + estimator_list_in
est_names = [est.latex_name for est in estimator_list]
method_names = []
method_values = []
assert dynamic_goals[0] is None, (
'Need to start with standard ns to calculate efficiency gains')
for i, dynamic_goal in enumerate(dynamic_goals):
# set up settings
settings.dynamic_goal = dynamic_goal
settings.tuned_dynamic_p = tuned_dynamic_ps[i]
# if we have already done the standard calculation, set n_samples_max
# for dynamic calculations so it is slightly smaller than the number
# of samples the standard calculation used to ensure a fair comparison
# of performance. Otherwise dynamic nested sampling will end up using
# more samples than standard nested sampling as it does not terminate
# until after the number of samples is greater than n_samples_max.
if i != 0 and settings.dynamic_goal is not None:
assert dynamic_goals[0] is None
assert isinstance(estimator_list[0], e.CountSamples)
n_samples_max = np.mean(np.asarray([val[0] for val in
method_values[0]]))
# This factor is a function of the dynamic goal as typically
# evidence calculations have longer additional threads than
# parameter estimation calculations.
reduce_factor = 1 - ((1.5 - 0.5 * settings.dynamic_goal) *
(settings.nbatch / settings.nlive_const))
settings.n_samples_max = int(n_samples_max * reduce_factor)
print('dynamic_goal=' + str(settings.dynamic_goal),
'n_samples_max=' + str(settings.n_samples_max))
# get a name for this calculation method
if dynamic_goal is None:
method_names.append('standard')
else:
method_names.append('dynamic $G=' +
str(settings.dynamic_goal) + '$')
if settings.tuned_dynamic_p is True:
method_names[-1] += ' tuned'
# generate runs and get results
run_list = ns.get_run_data(settings, n_run, parallel=parallel,
random_seeds=run_random_seeds,
load=load, save=save,
max_workers=max_workers,
cache_dir=cache_dir,
overwrite_existing=overwrite_existing)
method_values.append(pu.parallel_apply(
nestcheck.ns_run_utils.run_estimators, run_list,
func_args=(estimator_list,), max_workers=max_workers,
parallel=parallel))
results = pf.efficiency_gain_df(method_names, method_values, est_names)
if save:
# save the results data frame
print('get_dynamic_results: saving results to\n' + save_file)
results.to_pickle(save_file)
return results
@iou.timing_decorator
def merged_dynamic_results(dim_scale_list, likelihood_list, settings,
estimator_list, **kwargs):
"""
Wrapper for running get_dynamic_results for many different likelihood,
dimension and prior scales, and merging the output into a single
data frame.
See get_dynamic_results doccumentation for more details.
Parameters
----------
dim_scale_list: list of tuples
(dim, prior_scale) pairs to run
likelihood_list: list of likelihood objects
settings_in: PerfectNSSettings object
estimator_list: list of estimator objects
n_run: int, optional
number of runs for use with each setting.
dynamic_goals_in: list of floats, optional
which dynamic goals to test
(remaining kwargs passed to get_dynamic_results)
Returns
-------
results: pandas data frame
"""
dynamic_goals = kwargs.pop('dynamic_goals', [0, 1])
load = kwargs.pop('load', True) # ensure default True for merged results
save = kwargs.pop('save', True) # ensure default True for merged results
n_run = kwargs.pop('n_run', 1000)
results_list = []
for likelihood in likelihood_list:
for n_dim, prior_scale in dim_scale_list:
settings.n_dim = n_dim
settings.likelihood = likelihood
if n_dim >= 50:
settings.prior = priors.GaussianCached(prior_scale=prior_scale)
else:
settings.prior = priors.Gaussian(prior_scale=prior_scale)
like_lab = (type(settings.likelihood).__name__
.replace('ExpPower', 'Exp Power'))
if type(settings.likelihood).__name__ == 'ExpPower':
like_lab += (', $b=' + str(settings.likelihood.power)
.replace('0.75', r'\frac{3}{4}') + '$')
print(like_lab, 'd=' + str(n_dim),
'prior_scale=' + str(prior_scale))
df_temp = get_dynamic_results(
n_run, dynamic_goals, estimator_list, settings, save=save,
load=load, **kwargs)
new_inds = ['likelihood', 'dimension $d$', r'$\sigma_\pi$']
df_temp[new_inds[0]] = like_lab
df_temp[new_inds[1]] = settings.n_dim
df_temp[new_inds[2]] = settings.prior.prior_scale
order = new_inds + list(df_temp.index.names)
df_temp.set_index(new_inds, drop=True, append=True,
inplace=True)
df_temp = df_temp.reorder_levels(order)
results_list.append(df_temp)
results = | pd.concat(results_list) | pandas.concat |
#!/usr/bin/env python
"""Plot data for deepmod methylation calling"""
########################################################################
# File: plot_deepmod_methylation_calling.py
# executable: plot_deepmod_methylation_calling.py
#
# Author: <NAME>
# History: Created 04/02/19
########################################################################
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from py3helpers.classification import ClassificationMetrics
from py3helpers.utils import list_dir, time_it
from py3helpers.seq_tools import ReverseComplement
class CustomAmbiguityPositions(object):
def __init__(self, ambig_filepath):
"""Deal with ambiguous positions from a tsv ambiguity position file with the format of
contig position strand change_from change_to
'name' 0 indexed position +/- C E
:param ambig_filepath: path to ambiguity position file"""
self.ambig_df = self.parseAmbiguityFile(ambig_filepath)
@staticmethod
def parseAmbiguityFile(ambig_filepath):
"""Parses a 'ambiguity position file' that should have the format:
contig position strand change_from change_to
:param ambig_filepath: path to ambiguity position file
"""
return pd.read_csv(ambig_filepath, sep='\t',
usecols=(0, 1, 2, 3, 4),
names=["contig", "position", "strand", "change_from", "change_to"],
dtype={"contig": np.str,
"position": np.int,
"strand": np.str,
"change_from": np.str,
"change_to": np.str})
def getForwardSequence(self, contig, raw_sequence):
"""Edit 'raw_sequence' given a ambiguity positions file. Assumes raw_sequence is forward direction( 5'-3')
:param contig: which contig the sequence belongs (aka header)
:param raw_sequence: raw nucleotide sequence
:return: edited nucleotide sequence
"""
return self._get_substituted_sequence(contig, raw_sequence, "+")
def getBackwardSequence(self, contig, raw_sequence):
"""Edit 'raw_sequence' given a ambiguity positions file, Assumes raw_sequence is forward direction( 5'-3')
:param contig: which contig the sequence belongs (aka header)
:param raw_sequence: raw nucleotide sequence
:return: edited nucleotide sequence
"""
rc = ReverseComplement()
raw_sequence = rc.complement(raw_sequence)
return self._get_substituted_sequence(contig, raw_sequence, "-")
def _get_substituted_sequence(self, contig, raw_sequence, strand):
"""Change the given raw nucleotide sequence using the edits defined in the positions file
:param contig: name of contig to find
:param raw_sequence: nucleotide sequence (note: this is note edited in this function)
:param strand: '+' or '-' to indicate strand
"""
contif_df = self._get_contig_positions(contig, strand)
raw_sequence = list(raw_sequence)
for _, row in contif_df.iterrows():
if raw_sequence[row["position"]] != row["change_from"]:
raise RuntimeError(
"[CustomAmbiguityPositions._get_substituted_sequence]Illegal substitution requesting "
"change from %s to %s, row: %s" % (raw_sequence[row["position"]], row["change_to"], row))
raw_sequence[row["position"]] = row["change_to"]
return "".join(raw_sequence)
def _get_contig_positions(self, contig, strand):
"""Get all unique locations within the positions file
:param contig: name of contig to find
:param strand: '+' or '-' to indicate strand
"""
df = self.ambig_df.loc[
(self.ambig_df["contig"] == contig) & (self.ambig_df["strand"] == strand)].drop_duplicates()
assert len(df['position']) == len(set(df['position'])), "Multiple different changes for a single position. {}" \
.format(df['position'])
return df
def parse_deepmod_bed(deepmod_bed_path):
"""Parse the summary chromosome bed file output from deepmod
eg: Chromosome 2 3 C 8 + 2 3 0,0,0 8 0 0
"""
return pd.read_csv(deepmod_bed_path, sep=" ", header=None,
usecols=(0, 1, 3, 5, 9, 10, 11),
names=["contig", "start_position", "base", "strand", "n_reads", "modification_percentage",
"n_mod_calls"],
dtype={"contig": np.str,
"start_position": np.int,
"base": np.str,
"strand": np.str,
"n_reads": np.int,
"modification_percentage": np.int,
"n_mod_calls": np.int})
def aggregate_deepmod_data(deepmod_output_dir):
deepmod_beds = list_dir(deepmod_output_dir, ext="bed")
deepmod_bed_data = []
for bed in deepmod_beds:
data = parse_deepmod_bed(bed)
data["E"] = (data["modification_percentage"] / 100)
data["C"] = 1 - (data["modification_percentage"] / 100)
deepmod_bed_data.append(data)
return pd.concat(deepmod_bed_data)
def print_confusion_matrix(tp, fp, fn, tn):
precision = tp / (tp + fp)
false_discovery_rate = fp / (tp + fp)
false_omission_rate = fn / (tn + fn)
negative_predictive_value = tn / (tn + fn)
true_positive_rate_recall = tp / (tp + fn)
false_negative_rate = fn / (tp + fn)
false_positive_rate = fp / (fp + tn)
true_negative_rate_specificity = tn / (tn + fp)
positive_likelihood_ratio = true_positive_rate_recall / false_positive_rate
negative_likelihood_ratio = false_negative_rate / true_negative_rate_specificity
diagnostic_odds_ratio = positive_likelihood_ratio / negative_likelihood_ratio
f1_score = 2 * ((precision * true_positive_rate_recall) / (precision + true_positive_rate_recall))
return (np.asarray(
[[tp, fp, precision, false_discovery_rate],
[fn, tn, false_omission_rate, negative_predictive_value],
[true_positive_rate_recall, false_positive_rate, positive_likelihood_ratio, diagnostic_odds_ratio],
[false_negative_rate, true_negative_rate_specificity, negative_likelihood_ratio, f1_score]]))
def plot_confusion_matrix(tp, fp, fn, tn, classes=("E", "C"), title="Confusion Matrix",
output_path=None, normalize=False):
"""Plot the confusion matrix with the information of each box explained
:param classes: classes to label axis
:param normalize: option to normalize output of confusion matrix
:param tp: true positives
:param fp: false positives
:param fn: false negatives
:param tn: true negatives
:param title: title of the plot
:param output_path: place to save plot
"""
data = np.asarray([[tp, fp], [fn, tn]])
# total = sum([tp, fp, fn, tn])
# precision = tp / (tp + fp)
# false_discovery_rate = fp / (tp + fp)
#
# false_omission_rate = fn / (tn + fn)
# negative_predictive_value = tn / (tn + fn)
#
# true_positive_rate_recall = tp / (tp + fn)
# false_negative_rate = fn / (tp + fn)
#
# false_positive_rate = fp / (fp + tn)
# true_negative_rate_specificity = tn / (tn + fp)
#
# positive_likelihood_ratio = true_positive_rate_recall / false_positive_rate
# negative_likelihood_ratio = false_negative_rate / true_negative_rate_specificity
#
# diagnostic_odds_ratio = positive_likelihood_ratio / negative_likelihood_ratio
#
# f1_score = 2 * ((precision * true_positive_rate_recall) / (precision + true_positive_rate_recall))
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
n_data = data.astype('float') / data.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(data, interpolation='nearest', cmap=plt.cm.Blues)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(data.shape[1]),
yticks=np.arange(data.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = data.max() / 2.
for i in range(data.shape[0]):
for j in range(data.shape[1]):
ax.text(j, i, format(data[i, j], 'd') + format(n_data[i, j], '.2f'),
ha="center", va="center",
color="white" if data[i, j] > thresh else "black")
fig.tight_layout()
if output_path is not None:
plt.savefig(output_path)
else:
plt.show()
return True
def main():
cpg_positions_file = "/Users/andrewbailey/data/references/ecoli/CG_ecoli_k12_mg1655_C_E.positions"
modified_deepmod_output_dir = "/Users/andrewbailey/CLionProjects/DeepMod/ecoli_pcr_MSssI_R9"
canonical_deepmod_output_dir = "/Users/andrewbailey/CLionProjects/DeepMod/ecoli_pcr_MSssI_R9"
output_dir = "/Users/andrewbailey/CLionProjects/modification_detection_pipeline/output_dir/plotting_output/"
log_file_path = os.path.join(output_dir, "confusion_matrices_file.txt")
cpg_positions = CustomAmbiguityPositions(cpg_positions_file)
canonical_data = aggregate_deepmod_data(canonical_deepmod_output_dir)
canonical_data["E_label"] = 0
canonical_data["C_label"] = 1
modified_data = aggregate_deepmod_data(modified_deepmod_output_dir)
modified_data["E_label"] = 1
modified_data["C_label"] = 0
tps = 0
fps = 0
tns = 0
fns = 0
all_data = []
with open(log_file_path, "w") as log_file:
chromosomes = set(modified_data["contig"]) | set(canonical_data["contig"])
strands = set(modified_data["strand"]) | set(canonical_data["strand"])
for chromosome in chromosomes:
for strand in strands:
# get positions for strand and contig
sc_positions = cpg_positions.ambig_df.loc[(cpg_positions.ambig_df["strand"] == strand) &
(cpg_positions.ambig_df["contig"] == chromosome)]
# pare data sets for specific contig and strand to get positions that are cpgs
mod_sc_data = modified_data.loc[(modified_data["contig"] == chromosome) &
(modified_data["strand"] == strand)]
mod_methylation_calls = mod_sc_data.loc[mod_sc_data["start_position"].isin(sc_positions["position"])]
canon_sc_data = canonical_data.loc[(canonical_data["contig"] == chromosome) &
(canonical_data["strand"] == strand)]
canon_methylation_calls = canon_sc_data.loc[
canon_sc_data["start_position"].isin(sc_positions["position"])]
# per site
n_negative_calls = sum(canon_methylation_calls["n_reads"])
n_false_negatives = sum(canon_methylation_calls["n_mod_calls"])
n_true_negatives = n_negative_calls - n_false_negatives
n_positive_calls = sum(mod_methylation_calls["n_reads"])
n_true_positives = sum(mod_methylation_calls["n_mod_calls"])
n_false_positives = n_positive_calls - n_true_positives
tps += n_true_positives
fps += n_false_positives
tns += n_true_negatives
fns += n_false_negatives
print("Chromosome {} strand {}:".format(chromosome, strand), file=log_file)
print("Per-call confusion matrix", file=log_file)
print(print_confusion_matrix(n_true_positives, n_false_positives, n_false_negatives, n_true_negatives),
file=log_file)
plot_confusion_matrix(n_true_positives, n_false_positives, n_false_negatives, n_true_negatives,
normalize=True,
output_path=os.path.join(output_dir, "per_call_{}_{}_confusion_matrix.png".format(strand, chromosome)),
title="Per call CpG Normalized Confusion Matrix {}{}".format(strand, chromosome))
# per genomic position
chr_strand_data = | pd.concat([canon_methylation_calls, mod_methylation_calls]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = | Series(['foObaD__baRbaD', NA]) | pandas.Series |
import shutil
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
import json
input_e_dir = Path('./analysis/item_e/')
input_a_dir = Path('./analysis/item_a/')
output_ae_dir = Path('./analysis/item_ae')
shutil.rmtree(output_ae_dir, ignore_errors=True)
output_ae_dir.mkdir(parents=True, exist_ok=True)
input_c_dir = Path('./analysis/item_c/')
output_ce_dir = Path('./analysis/item_ce')
shutil.rmtree(output_ce_dir, ignore_errors=True)
output_ce_dir.mkdir(parents=True, exist_ok=True)
input_d_dir = Path('./analysis/item_d/')
output_de_dir = Path('./analysis/item_de')
shutil.rmtree(output_de_dir, ignore_errors=True)
output_de_dir.mkdir(parents=True, exist_ok=True)
for person in range(1,81):
c_csv = input_c_dir / ('S_c_' + str(person).zfill(2) + '.CSV')
e_csv = input_e_dir / ('S_e_' + str(person).zfill(2) + '.csv')
print(c_csv)
print(e_csv)
c_df = pd.read_csv(c_csv, na_values=['--undefined--', 'null'], skipinitialspace=True, sep=r"\s*[,]\s*", engine='python')
e_df = pd.read_csv(e_csv, na_values=['--undefined--', 'null'], skipinitialspace=True, sep=r"\s*[,]\s*", engine='python')
c_df.drop(e_df.filter(regex="Unname"), axis=1, inplace=True)
e_df.drop(e_df.filter(regex="Unname"), axis=1, inplace=True)
c_rows = []
for _, row in e_df.iterrows():
comps = row['Filename'].split('_')
if 'norm' in comps[0]:
c_row = row
c_row['Filename'] = c_row['Filename'].replace('_e_', '_c_')
c_rows.append(c_row)
for _, row in c_df.iterrows():
comps = row['Filename'].split('_')
if 'norm' not in comps[0]:
c_rows.append(row)
output_c_df = pd.DataFrame(c_rows)
output_c_df.to_csv(output_ce_dir / ('S_c_' + str(person).zfill(2) + '.csv'), index=False)
for person in range(1,81):
d_csv = input_d_dir / ('S_d_' + str(person).zfill(2) + '.csv')
e_csv = input_e_dir / ('S_e_' + str(person).zfill(2) + '.csv')
print(d_csv)
print(e_csv)
d_df = pd.read_csv(d_csv, na_values=['--undefined--', 'null'], skipinitialspace=True, sep=r"\s*[,]\s*", engine='python')
e_df = pd.read_csv(e_csv, na_values=['--undefined--', 'null'], skipinitialspace=True, sep=r"\s*[,]\s*", engine='python')
d_df.drop(d_df.filter(regex="Unname"), axis=1, inplace=True)
e_df.drop(e_df.filter(regex="Unname"), axis=1, inplace=True)
d_rows = []
for _, row in e_df.iterrows():
comps = row['Filename'].split('_')
if 'norm' in comps[0]:
d_row = row
d_row['Filename'] = d_row['Filename'].replace('_e_', '_d_')
d_rows.append(d_row)
for _, row in d_df.iterrows():
comps = row['Filename'].split('_')
if 'norm' not in comps[0]:
d_rows.append(row)
output_d_df = | pd.DataFrame(d_rows) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import click
import mdtraj as md
import pyemma as pm
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from typing import Dict, List, Optional, Union, Tuple
from pathlib import Path
import pickle
from msmtools.estimation import transition_matrix as _transition_matrix
from msmtools.analysis import timescales as _timescales
# In[2]:
def featurizer(hp_dict: Dict, traj_paths: List[str], top_path: str) -> List[np.ndarray]:
if hp_dict['feature__value'] == 'dihedrals':
assert hp_dict['dihedrals__which'] == 'all'
def f(traj: md.Trajectory, **kwargs) -> np.ndarray:
_, phi = md.compute_phi(traj)
_, psi = md.compute_psi(traj)
_, chi1 = md.compute_chi1(traj)
_, chi2 = md.compute_chi2(traj)
_, chi3 = md.compute_chi3(traj)
_, chi4 = md.compute_chi4(traj)
_, chi5 = md.compute_chi5(traj)
ftraj = np.concatenate([phi, psi, chi1, chi2, chi3, chi4, chi5], axis=1)
ftraj = np.concatenate([np.cos(ftraj), np.sin(ftraj)], axis=1)
return ftraj
elif hp_dict['feature__value'] == 'distances':
def f(traj: md.Trajectory, **kwargs):
scheme = kwargs['distances__scheme']
transform = kwargs['distances__transform']
centre = kwargs['distances__centre']
steepness = kwargs['distances__steepness']
ftraj, _ = md.compute_contacts(traj, scheme=scheme)
if transform=='logistic':
ftraj = 1.0/(1+np.exp(-steepness*(ftraj - centre)))
return ftraj
else:
raise ValueError
ftrajs = []
for traj_path in traj_paths:
traj = md.load(traj_path, top=top_path)
ftrajs.append(f(traj, **hp_dict))
return ftrajs
def tica(hp_dict: Dict[str, Union[float, int, str]], ftrajs: List[np.ndarray]) -> List[np.ndarray]:
lag = hp_dict['tica__lag']
stride = hp_dict['tica__stride']
dim = hp_dict['tica__dim']
tica = pm.coordinates.tica(ftrajs, lag=lag, dim=dim, kinetic_map=True)
ttrajs = tica.get_output()
return ttrajs, tica
def kmeans(hp_dict: Dict, ttrajs: List[np.ndarray], seed: int) -> List[np.ndarray]:
k = hp_dict['cluster__k']
max_iter = hp_dict['cluster__max_iter']
stride = hp_dict['cluster__stride']
kmeans = pm.coordinates.cluster_kmeans(ttrajs, k=k, max_iter=max_iter, stride=stride, fixed_seed=seed, n_jobs=1)
dtrajs = kmeans.dtrajs
return dtrajs, kmeans
def its(dtrajs: List[np.ndarray], lags: List[int], nits: int) -> np.ndarray:
its_obj = pm.msm.timescales_msm(dtrajs=dtrajs, lags=lags, nits=nits)
return its_obj.timescales
def score(dtrajs: List[np.ndarray], lags: List[int], nits: int) -> np.ndarray:
all_vs = []
for lag in lags:
m = pm.msm.estimate_markov_model(dtrajs, lag=lag)
vs = np.array([m.score(dtrajs, score_k=k) for k in range(2, nits+2)])
vs = vs.reshape(1, -1)
all_vs.append(vs)
all_vs = np.concatenate(all_vs, axis=0)
return all_vs
def bootstrap(ftrajs: List[np.ndarray], rng: np.random.Generator) -> List[np.ndarray]:
probs = np.array([x.shape[0] for x in ftrajs])
probs = probs/np.sum(probs)
ix = np.arange(len(ftrajs))
new_ix = rng.choice(ix,size=len(ftrajs), p=probs, replace=True)
return [ftrajs[i] for i in new_ix]
def summarise(df):
df_summary = df.groupby(['hp_ix', 'lag', 'process']).agg(median=(0, lambda x: np.quantile(x, 0.5)),
lb=(0, lambda x: np.quantile(x, 0.025)),
ub=(0, lambda x: np.quantile(x, 0.975)),
count =(0, lambda x: x.shape[0]-x.isna().sum()))
return df_summary
def samples_to_summary(samples: np.ndarray, lags: List[int], hp_ix: int)-> pd.DataFrame:
"""
samples=np.ndarray[lagtime, process, bs_sample]
"""
df = pd.concat({(hp_ix, lags[i], j+2): pd.DataFrame(samples[i, j, :]) for i in range(samples.shape[0]) for j in range(samples.shape[1])})
df.index.rename(('hp_ix', 'lag', 'process', 'bs_ix'), inplace=True)
df_summary = summarise(df)
return df_summary
@click.command()
@click.argument("protein", type=str)
@click.argument("hp_ix", type=int)
def run(protein, hp_ix):
seed = 49587
n_bootstraps = 100
nits=20
rng = np.random.default_rng(seed)
lags = list(range(1, 102, 10))
hps = pd.read_hdf('../data/msms/hpsample.h5')
top_path = f'/home/rob/Data/DESRES/DESRES-Trajectory_{protein.upper()}-0-protein/{protein.upper()}-0-protein/protein.pdb'
traj_paths = list(Path('/home/rob/Data/DESRES/').rglob(f'*{protein.upper()}*/**/*.xtc'))
traj_paths = [str(x) for x in traj_paths]
traj_paths.sort()
assert traj_paths
source_ts = pd.DataFrame(pd.read_hdf(f'../analysis/{protein}/summary.h5', key='timescales'))
source_vs = pd.DataFrame(pd.read_hdf(f'../analysis/{protein}/summary.h5', key='vamps'))
ftrajs_all = featurizer(hps.loc[hp_ix, :].to_dict(), traj_paths, top_path)
# Bootstrap results
ts_samples = []
vs_samples = []
for i in range(n_bootstraps):
print(i, end=', ')
ftrajs = bootstrap(ftrajs_all, rng)
assert len(ftrajs) == len(ftrajs_all)
ttrajs, tica_mod = tica(hps.loc[hp_ix, :].to_dict(), ftrajs)
dtrajs, kmeans_mod = kmeans(hps.loc[hp_ix, :].to_dict(), ttrajs, seed)
ts = its(dtrajs, lags, nits=nits)
vs = score(dtrajs, lags, nits=nits)
ts_samples.append(ts[..., np.newaxis])
vs_samples.append(vs[..., np.newaxis])
# Summarise values
ts_samples = np.concatenate(ts_samples, axis=-1)
vs_samples = np.concatenate(vs_samples, axis=-1)
target_ts = samples_to_summary(ts_samples, lags, hp_ix)
target_vs = samples_to_summary(vs_samples, lags, hp_ix)
# Compare to msmsense values
comp_ts = | pd.merge(target_ts, source_ts, left_index=True, right_index=True, how='left') | pandas.merge |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4]), Index([1, 3, 5])),
(Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])),
(timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)),
(date_range("20170101", periods=3), date_range("20170102", periods=3)),
(
date_range("20170101", periods=3, tz="US/Eastern"),
| date_range("20170102", periods=3, tz="US/Eastern") | pandas.date_range |
import os
from collections import Counter
from os import listdir
from os.path import isfile, join
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from matplotlib import style
style.use('ggplot')
import scipy
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import numpy as np
from sys import argv
import Orange
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mcolors, cm
from matplotlib.collections import PolyCollection
from classifiers import classifiers_list
from datasetsDelaunay import dataset_list_bi, dataset_list_mult
from folders import output_dir, dir_pca_biclasse, metricas_biclasse, dir_pca_multiclasse, metricas_multiclasse
from parameters import order, alphas
order_dict = {'area': 1,
'volume': 2,
'area_volume_ratio': 3,
'edge_ratio': 4,
'radius_ratio': 5,
'aspect_ratio': 6,
'max_solid_angle': 7,
'min_solid_angle': 8,
'solid_angle': 9}
class Statistics:
def __init__(self):
pass
def compute_CD_customizado(self, avranks, n, alpha="0.05", test="nemenyi"):
"""
Returns critical difference for Nemenyi or Bonferroni-Dunn test
according to given alpha (either alpha="0.05" or alpha="0.1") for average
ranks and number of tested datasets N. Test can be either "nemenyi" for
for Nemenyi two tailed test or "bonferroni-dunn" for Bonferroni-Dunn test.
"""
k = len(avranks)
d = {("nemenyi", "0.05"): [1.960, 2.344, 2.569, 2.728, 2.850, 2.948, 3.031, 3.102, 3.164, 3.219, 3.268, 3.313,
3.354, 3.391, 3.426,
3.458, 3.489, 3.517, 3.544, 3.569, 3.593, 3.616, 3.637, 3.658, 3.678, 3.696, 3.714,
3.732, 3.749, 3.765,
3.780, 3.795, 3.810, 3.824, 3.837, 3.850, 3.863, 3.876, 3.888, 3.899, 3.911, 3.922,
3.933, 3.943, 3.954,
3.964, 3.973, 3.983, 3.992],
("nemenyi", "0.1"): [0, 0, 1.644854, 2.052293, 2.291341, 2.459516,
2.588521, 2.692732, 2.779884, 2.854606, 2.919889,
2.977768, 3.029694, 3.076733, 3.119693, 3.159199,
3.195743, 3.229723, 3.261461, 3.291224, 3.319233],
("bonferroni-dunn", "0.05"): [0, 0, 1.960, 2.241, 2.394, 2.498, 2.576,
2.638, 2.690, 2.724, 2.773],
("bonferroni-dunn", "0.1"): [0, 0, 1.645, 1.960, 2.128, 2.241, 2.326,
2.394, 2.450, 2.498, 2.539]}
q = d[(test, alpha)]
cd = q[k] * (k * (k + 1) / (6.0 * n)) ** 0.5
return cd
def calcula_media_folds_biclasse(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_biclasse_' + mode + '.csv', index=False)
def calcula_media_folds_multiclass(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_multiclass_' + mode + '.csv', index=False)
def separa_delaunay_biclass(self, filename):
df = pd.read_csv(filename)
list_base = []
for p in np.arange(0, len(preproc_type)):
list_base.append(df[(df['PREPROC'] == preproc_type[p])])
df_base = list_base.pop(0)
for i in np.arange(0, len(list_base)):
df_base = pd.concat([df_base, list_base[i]], ignore_index=True)
for o in order:
for a in alphas:
dfr = df[(df['ORDER'] == o)]
dfr1 = dfr[(dfr['ALPHA'] == str(a))]
df_file = pd.concat([df_base, dfr1], ignore_index=True)
df_file.to_csv('./../output_dir/result_biclass' + '_' + o + '_' + str(a) + '.csv', index=False)
def read_dir_files(self, dir_name):
f = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return f
def find_best_rank(self, results_dir, tipo):
results = self.read_dir_files(results_dir)
df = pd.DataFrame(columns=[['ARQUIVO', 'WINER']])
i = 0
for f in results:
df_temp = pd.read_csv(results_dir + f)
df.at[i, 'ARQUIVO'] = f
df.at[i, 'WINER'] = df_temp.iloc[0, 0]
i += 1
df.to_csv(output_dir + tipo)
def find_best_delaunay(self, results_dir, tipo):
df = pd.read_csv(results_dir + tipo)
i = 0
j = 0
df_best = pd.DataFrame(columns=['ARQUIVO', 'WINER'])
win = list(df['WINER'])
for w in win:
if w == 'DELAUNAY':
df_best.at[i, 'ARQUIVO'] = df.iloc[j, 1]
df_best.at[i, 'WINER'] = df.iloc[j, 2]
i += 1
j += 1
df_best.to_csv(output_dir + 'only_best_delaunay_pca_biclass_media_rank.csv')
def rank_by_algorithm(self, df, tipo, wd, reducao, order, alpha):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param delaunay_type:
:return:
'''
df_tabela = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE',
'DELAUNAY', 'RANK_DELAUNAY', 'DELAUNAY_TYPE', 'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df.to_csv(dir_pca_biclasse + reducao + '_' + tipo + '_' + order + '_' + str(alpha) + '.csv')
j = 0
for d in dataset_list_bi:
for m in metricas_biclasse:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_tabela.at[j, 'DATASET'] = d
df_tabela.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_tabela.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_tabela.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_tabela.at[j, 'DELAUNAY'] = aux.at[indice, m]
df_tabela.at[j, 'DELAUNAY_TYPE'] = order
df_tabela.at[j, 'ALPHA'] = alpha
df_tabela.at[j, 'unit'] = m
j += 1
df_pre = df_tabela[df_tabela['unit'] == 'PRE']
df_rec = df_tabela[df_tabela['unit'] == 'REC']
df_spe = df_tabela[df_tabela['unit'] == 'SPE']
df_f1 = df_tabela[df_tabela['unit'] == 'F1']
df_geo = df_tabela[df_tabela['unit'] == 'GEO']
df_iba = df_tabela[df_tabela['unit'] == 'IBA']
df_auc = df_tabela[df_tabela['unit'] == 'AUC']
pre = df_pre[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
rec = df_rec[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
spe = df_spe[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
f1 = df_f1[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
geo = df_geo[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
iba = df_iba[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
auc = df_auc[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
pre = pre.reset_index()
pre.drop('index', axis=1, inplace=True)
rec = rec.reset_index()
rec.drop('index', axis=1, inplace=True)
spe = spe.reset_index()
spe.drop('index', axis=1, inplace=True)
f1 = f1.reset_index()
f1.drop('index', axis=1, inplace=True)
geo = geo.reset_index()
geo.drop('index', axis=1, inplace=True)
iba = iba.reset_index()
iba.drop('index', axis=1, inplace=True)
auc = auc.reset_index()
auc.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
pre_rank = pre.rank(axis=1, ascending=False)
rec_rank = rec.rank(axis=1, ascending=False)
spe_rank = spe.rank(axis=1, ascending=False)
f1_rank = f1.rank(axis=1, ascending=False)
geo_rank = geo.rank(axis=1, ascending=False)
iba_rank = iba.rank(axis=1, ascending=False)
auc_rank = auc.rank(axis=1, ascending=False)
df_pre = df_pre.reset_index()
df_pre.drop('index', axis=1, inplace=True)
df_pre['RANK_ORIGINAL'] = pre_rank['ORIGINAL']
df_pre['RANK_SMOTE'] = pre_rank['SMOTE']
df_pre['RANK_SMOTE_SVM'] = pre_rank['SMOTE_SVM']
df_pre['RANK_BORDERLINE1'] = pre_rank['BORDERLINE1']
df_pre['RANK_BORDERLINE2'] = pre_rank['BORDERLINE2']
df_pre['RANK_GEOMETRIC_SMOTE'] = pre_rank['GEOMETRIC_SMOTE']
df_pre['RANK_DELAUNAY'] = pre_rank['DELAUNAY']
df_rec = df_rec.reset_index()
df_rec.drop('index', axis=1, inplace=True)
df_rec['RANK_ORIGINAL'] = rec_rank['ORIGINAL']
df_rec['RANK_SMOTE'] = rec_rank['SMOTE']
df_rec['RANK_SMOTE_SVM'] = rec_rank['SMOTE_SVM']
df_rec['RANK_BORDERLINE1'] = rec_rank['BORDERLINE1']
df_rec['RANK_BORDERLINE2'] = rec_rank['BORDERLINE2']
df_rec['RANK_GEOMETRIC_SMOTE'] = rec_rank['GEOMETRIC_SMOTE']
df_rec['RANK_DELAUNAY'] = rec_rank['DELAUNAY']
df_spe = df_spe.reset_index()
df_spe.drop('index', axis=1, inplace=True)
df_spe['RANK_ORIGINAL'] = spe_rank['ORIGINAL']
df_spe['RANK_SMOTE'] = spe_rank['SMOTE']
df_spe['RANK_SMOTE_SVM'] = spe_rank['SMOTE_SVM']
df_spe['RANK_BORDERLINE1'] = spe_rank['BORDERLINE1']
df_spe['RANK_BORDERLINE2'] = spe_rank['BORDERLINE2']
df_spe['RANK_GEOMETRIC_SMOTE'] = spe_rank['GEOMETRIC_SMOTE']
df_spe['RANK_DELAUNAY'] = spe_rank['DELAUNAY']
df_f1 = df_f1.reset_index()
df_f1.drop('index', axis=1, inplace=True)
df_f1['RANK_ORIGINAL'] = f1_rank['ORIGINAL']
df_f1['RANK_SMOTE'] = f1_rank['SMOTE']
df_f1['RANK_SMOTE_SVM'] = f1_rank['SMOTE_SVM']
df_f1['RANK_BORDERLINE1'] = f1_rank['BORDERLINE1']
df_f1['RANK_BORDERLINE2'] = f1_rank['BORDERLINE2']
df_f1['RANK_GEOMETRIC_SMOTE'] = f1_rank['GEOMETRIC_SMOTE']
df_f1['RANK_DELAUNAY'] = f1_rank['DELAUNAY']
df_geo = df_geo.reset_index()
df_geo.drop('index', axis=1, inplace=True)
df_geo['RANK_ORIGINAL'] = geo_rank['ORIGINAL']
df_geo['RANK_SMOTE'] = geo_rank['SMOTE']
df_geo['RANK_SMOTE_SVM'] = geo_rank['SMOTE_SVM']
df_geo['RANK_BORDERLINE1'] = geo_rank['BORDERLINE1']
df_geo['RANK_BORDERLINE2'] = geo_rank['BORDERLINE2']
df_geo['RANK_GEOMETRIC_SMOTE'] = geo_rank['GEOMETRIC_SMOTE']
df_geo['RANK_DELAUNAY'] = geo_rank['DELAUNAY']
df_iba = df_iba.reset_index()
df_iba.drop('index', axis=1, inplace=True)
df_iba['RANK_ORIGINAL'] = iba_rank['ORIGINAL']
df_iba['RANK_SMOTE'] = iba_rank['SMOTE']
df_iba['RANK_SMOTE_SVM'] = iba_rank['SMOTE_SVM']
df_iba['RANK_BORDERLINE1'] = iba_rank['BORDERLINE1']
df_iba['RANK_BORDERLINE2'] = iba_rank['BORDERLINE2']
df_iba['RANK_GEOMETRIC_SMOTE'] = iba_rank['GEOMETRIC_SMOTE']
df_iba['RANK_DELAUNAY'] = iba_rank['DELAUNAY']
df_auc = df_auc.reset_index()
df_auc.drop('index', axis=1, inplace=True)
df_auc['RANK_ORIGINAL'] = auc_rank['ORIGINAL']
df_auc['RANK_SMOTE'] = auc_rank['SMOTE']
df_auc['RANK_SMOTE_SVM'] = auc_rank['SMOTE_SVM']
df_auc['RANK_BORDERLINE1'] = auc_rank['BORDERLINE1']
df_auc['RANK_BORDERLINE2'] = auc_rank['BORDERLINE2']
df_auc['RANK_GEOMETRIC_SMOTE'] = auc_rank['GEOMETRIC_SMOTE']
df_auc['RANK_DELAUNAY'] = auc_rank['DELAUNAY']
# avarege rank
media_pre_rank = pre_rank.mean(axis=0)
media_rec_rank = rec_rank.mean(axis=0)
media_spe_rank = spe_rank.mean(axis=0)
media_f1_rank = f1_rank.mean(axis=0)
media_geo_rank = geo_rank.mean(axis=0)
media_iba_rank = iba_rank.mean(axis=0)
media_auc_rank = auc_rank.mean(axis=0)
media_pre_rank_file = media_pre_rank.reset_index()
media_pre_rank_file = media_pre_rank_file.sort_values(by=0)
media_rec_rank_file = media_rec_rank.reset_index()
media_rec_rank_file = media_rec_rank_file.sort_values(by=0)
media_spe_rank_file = media_spe_rank.reset_index()
media_spe_rank_file = media_spe_rank_file.sort_values(by=0)
media_f1_rank_file = media_f1_rank.reset_index()
media_f1_rank_file = media_f1_rank_file.sort_values(by=0)
media_geo_rank_file = media_geo_rank.reset_index()
media_geo_rank_file = media_geo_rank_file.sort_values(by=0)
media_iba_rank_file = media_iba_rank.reset_index()
media_iba_rank_file = media_iba_rank_file.sort_values(by=0)
media_auc_rank_file = media_auc_rank.reset_index()
media_auc_rank_file = media_auc_rank_file.sort_values(by=0)
# Grava arquivos importantes
df_pre.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
df_rec.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
df_spe.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
df_f1.to_csv(wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
df_geo.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
df_iba.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
df_auc.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
media_auc_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
'''avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()'''
print('Delaunay Type= ', delaunay_type)
print('Algorithm= ', name)
def rank_total_by_algorithm(self, tipo, wd, reducao, order, alpha):
delaunay_name = 'RANK_DTO_' + str(order) + '_' + str(alpha)
cols = ['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1',
'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
for name in classifiers_list:
print(os.path.abspath(os.getcwd()))
# Grava arquivos importantes
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_pre.csv'
df_pre = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_rec.csv'
df_rec = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_spe.csv'
df_spe = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_f1.csv'
df_f1 = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_geo.csv'
df_geo = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_iba.csv'
df_iba = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_auc.csv'
df_auc = pd.read_csv(path_name)
# PRE
df_pre_col = df_pre[cols]
df_pre_col.loc[:, delaunay_name] = df_pre_col['RANK_DELAUNAY'].values
df_pre_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_pre = df_pre_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv'
ranking_pre['ALGORITHM'] = name
ranking_pre.to_csv(path_name, index=False)
# REC
df_rec_col = df_rec[cols]
df_rec_col.loc[:, delaunay_name] = df_rec_col['RANK_DELAUNAY'].values
df_rec_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_rec = df_rec_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv'
ranking_rec['ALGORITHM'] = name
ranking_rec.to_csv(path_name, index=False)
# SPE
df_spe_col = df_spe[cols]
df_spe_col.loc[:, delaunay_name] = df_spe_col['RANK_DELAUNAY'].values
df_spe_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_spe = df_spe_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv'
ranking_spe['ALGORITHM'] = name
ranking_spe.to_csv(path_name, index=False)
# F1
df_f1_col = df_f1[cols]
df_f1_col.loc[:, delaunay_name] = df_f1_col['RANK_DELAUNAY'].values
df_f1_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_f1 = df_f1_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv'
ranking_f1['ALGORITHM'] = name
ranking_f1.to_csv(path_name, index=False)
# GEO
df_geo_col = df_geo[cols]
df_geo_col.loc[:, delaunay_name] = df_geo_col['RANK_DELAUNAY'].values
df_geo_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_geo = df_geo_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv'
ranking_geo['ALGORITHM'] = name
ranking_geo.to_csv(path_name, index=False)
# IBA
df_iba_col = df_iba[cols]
df_iba_col.loc[:, delaunay_name] = df_iba_col['RANK_DELAUNAY'].values
df_iba_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_iba = df_iba_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv'
ranking_iba['ALGORITHM'] = name
ranking_iba.to_csv(path_name, index=False)
# AUC
df_auc_col = df_auc[cols]
df_auc_col.loc[:, delaunay_name] = df_auc_col['RANK_DELAUNAY'].values
df_auc_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_auc = df_auc_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv'
ranking_auc['ALGORITHM'] = name
ranking_auc.to_csv(path_name, index=False)
def rank_by_algorithm_dataset(self, filename):
df = pd.read_csv(filename)
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv('./../output_dir/rank/rank_algorithm_dataset_' + name + '_' + name1 + '.csv', index=False)
def rank_by_algorithm_dataset_only_dto(self, filename):
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv(
'./../output_dir/rank/only_dto/rank_algorithm_dataset_only_dto_' + name + '_' + name1 + '.csv',
index=False)
df_graph = group1.copy()
df_graph = df_graph.replace('area', 1)
df_graph = df_graph.replace('volume', 2)
df_graph = df_graph.replace('area_volume_ratio', 3)
df_graph = df_graph.replace('edge_ratio', 4)
df_graph = df_graph.replace('radius_ratio', 5)
df_graph = df_graph.replace('aspect_ratio', 6)
df_graph = df_graph.replace('max_solid_angle', 7)
df_graph = df_graph.replace('min_solid_angle', 8)
df_graph = df_graph.replace('solid_angle', 9)
legend = ['area', 'volume', 'area_volume_ratio', 'edge_ratio', 'radius_ratio', 'aspect_ratio',
'max_solid_angle', 'min_solid_angle', 'solid_angle']
x = df_graph['ORDER'].values
y = df_graph['ALPHA'].values.astype(float)
dz = df_graph['AUC'].values
N = x.shape[0]
z = np.zeros(N)
dx = 0.2 * np.ones(N)
dy = 0.2 * np.ones(N)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(111, projection='3d')
cs = ['r', 'g', 'b'] * 9
ax1.bar3d(x, y, z, dx, dy, dz, color=cs)
ax1.set_ylabel('Alpha')
ax1.set_xlabel('\n\n\n\n\nGeometry')
ax1.set_zlabel('AUC')
ax1.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax1.set_xticklabels(legend)
ax1.legend()
plt.show()
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig)
surf = ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.5)
fig.colorbar(surf, shrink=0.5, aspect=7)
ax.set_xlabel('Alpha')
ax.set_ylabel('\n\n\n\n\nGeometry')
ax.set_zlabel('AUC')
ax.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax.set_yticklabels(legend)
ax.legend()
plt.savefig('./../output_dir/rank/only_dto/only_dto_geometry_by_alpha_' + name + '_' + name1 + '.pdf')
plt.show()
def rank_by_measures_only_dto(self, filename):
best_geometry = pd.DataFrame(columns=['PREPROC', 'M', 'ALGORITHM', 'MEDIA_RANK'])
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
i = 0
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
group['rank_f1'] = group['F1'].rank(ascending=False)
group['rank_geo'] = group['GEO'].rank(ascending=False)
group['rank_iba'] = group['IBA'].rank(ascending=False)
group['rank_auc'] = group['AUC'].rank(ascending=False)
# AUC
group = group.sort_values(by=['rank_auc'])
media_rank_auc = group.groupby('PREPROC')['rank_auc'].mean()
df_media_rank_auc = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_AUC'])
df_media_rank_auc['PREPROC'] = media_rank_auc.index
df_media_rank_auc['MEDIA_RANK_AUC'] = media_rank_auc.values
df_media_rank_auc.sort_values(by=['MEDIA_RANK_AUC'], ascending=True, inplace=True)
df_media_rank_auc.reset_index(inplace=True)
df_media_rank_auc.drop('index', axis=1, inplace=True)
best_auc_geometry = df_media_rank_auc.loc[0]
# GEO
group = group.sort_values(by=['rank_geo'])
media_rank_geo = group.groupby('PREPROC')['rank_geo'].mean()
df_media_rank_geo = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_GEO'])
df_media_rank_geo['PREPROC'] = media_rank_geo.index
df_media_rank_geo['MEDIA_RANK_GEO'] = media_rank_geo.values
df_media_rank_geo.sort_values(by=['MEDIA_RANK_GEO'], ascending=True, inplace=True)
df_media_rank_geo.reset_index(inplace=True)
df_media_rank_geo.drop('index', axis=1, inplace=True)
best_geo_geometry = df_media_rank_geo.loc[0]
# IBA
group = group.sort_values(by=['rank_iba'])
media_rank_iba = group.groupby('PREPROC')['rank_iba'].mean()
df_media_rank_iba = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_IBA'])
df_media_rank_iba['PREPROC'] = media_rank_iba.index
df_media_rank_iba['MEDIA_RANK_IBA'] = media_rank_iba.values
df_media_rank_iba.sort_values(by=['MEDIA_RANK_IBA'], ascending=True, inplace=True)
df_media_rank_iba.reset_index(inplace=True)
df_media_rank_iba.drop('index', axis=1, inplace=True)
best_iba_geometry = df_media_rank_iba.loc[0]
# F1
group = group.sort_values(by=['rank_f1'])
media_rank_f1 = group.groupby('PREPROC')['rank_f1'].mean()
df_media_rank_f1 = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_F1'])
df_media_rank_f1['PREPROC'] = media_rank_f1.index
df_media_rank_f1['MEDIA_RANK_F1'] = media_rank_f1.values
df_media_rank_f1.sort_values(by=['MEDIA_RANK_F1'], ascending=True, inplace=True)
df_media_rank_f1.reset_index(inplace=True)
df_media_rank_f1.drop('index', axis=1, inplace=True)
best_f1_geometry = df_media_rank_f1.loc[0]
best_geometry.loc[i + 0, 'PREPROC'] = best_auc_geometry[0]
best_geometry.loc[i + 0, 'MEDIA_RANK'] = best_auc_geometry[1]
best_geometry.loc[i + 0, 'ALGORITHM'] = name
best_geometry.loc[i + 0, 'M'] = 'AUC'
best_geometry.loc[i + 1, 'PREPROC'] = best_geo_geometry[0]
best_geometry.loc[i + 1, 'MEDIA_RANK'] = best_geo_geometry[1]
best_geometry.loc[i + 1, 'ALGORITHM'] = name
best_geometry.loc[i + 1, 'M'] = 'GEO'
best_geometry.loc[i + 2, 'PREPROC'] = best_iba_geometry[0]
best_geometry.loc[i + 2, 'MEDIA_RANK'] = best_iba_geometry[1]
best_geometry.loc[i + 2, 'ALGORITHM'] = name
best_geometry.loc[i + 2, 'M'] = 'IBA'
best_geometry.loc[i + 3, 'PREPROC'] = best_f1_geometry[0]
best_geometry.loc[i + 3, 'MEDIA_RANK'] = best_f1_geometry[1]
best_geometry.loc[i + 3, 'ALGORITHM'] = name
best_geometry.loc[i + 3, 'M'] = 'F1'
i += 4
group.to_csv('./../output_dir/rank/rank_by_measures' + '_' + name + '.csv', index=False)
best_geometry.to_csv('./../output_dir/rank/best_dto_geometry_rank.csv', index=False)
def find_best_dto(self):
'''
Find best DTO geometry and alpha parameter
:return:
'''
df = | pd.read_csv('./../output_dir/rank/rank_by_measures.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
class TransactionManager:
"""
An interaface to store and
manage all transactions.
Transactions are the minimal unit to represent
the outcome of a market.
Attributes
-----------
name_col: list of str
Name of the columns to use in the dataframe
returned.
n_trans: int
Number of transactions currently in the Manager
trans: list of tuples
List of the actual transactions available
"""
name_col = ['bid', 'quantity', 'price', 'source', 'active']
def __init__(self):
"""
"""
self.n_trans = 0
self.trans = []
def add_transaction(self, bid, quantity, price, source, active):
"""Add a transaction to the transactions list
Parameters
----------
bid : int
Unique identifier of the bid
quantity : float
transacted quantity
price : float
transacted price
source : int
Identifier of the second party in the trasaction,
-1 if there is no clear second party, such as
in a double auction.
active :
True` if the bid is still active after the
transaction.
Returns
--------
trans_id: int
id of the added transaction, -1 if fails
Examples
---------
>>> tm = pm.TransactionManager()
>>> tm.add_transaction(1, 0.5, 2.1, -1, False)
0
>>> tm.trans
[(1, 0.5, 2.1, -1, False)]
>>> tm.n_trans
1
"""
new_trans = (bid, quantity, price, source, active)
self.trans.append(new_trans)
self.n_trans += 1
return self.n_trans - 1
def get_df(self):
"""Returns the transaction dataframe
Parameters
----------
Returns
-------
df: pd.DataFrame
A pandas dataframe representing all the transactions
stored.
Examples
---------
>>> tm = pm.TransactionManager()
>>> tm.add_transaction(1, 0.5, 2.1, -1, False)
0
>>> tm.add_transaction(5, 0, 0, 3, True)
1
>>> tm.get_df()
bid quantity price source active
0 1 0.5 2.1 -1 False
1 5 0.0 0.0 3 True
"""
df = | pd.DataFrame(self.trans, columns=self.name_col) | pandas.DataFrame |
import pandas as pd
import numpy as np
from apscheduler.schedulers.blocking import BlockingScheduler
from github import Github
from github import InputGitTreeElement
import re
sched = BlockingScheduler()
@sched.scheduled_job('cron', day_of_week='mon-sun', hour=3)
def scheduled_job():
def do_fuzzy_search(country):
try:
result = pycountry.countries.search_fuzzy(country)
except Exception:
return np.nan
else:
return result[0].alpha_2
##################################
############GitHUB################
death_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
confirmed_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
recovered_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
country_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv')
confirmed_df.drop(columns=['Lat','Long'],axis=1,inplace=True)
death_df.drop(columns=['Lat','Long'],axis=1,inplace=True)
recovered_df.drop(columns=['Lat','Long'],axis=1,inplace=True)
country_df.drop(columns=['People_Tested','People_Hospitalized','Lat','Long_','Last_Update','UID'],axis=1,inplace=True)
confirmed_df=confirmed_df.groupby(['Country/Region']).sum()
death_df=death_df.groupby(['Country/Region']).sum()
recovered_df=recovered_df.groupby(['Country/Region']).sum()
confirmed_df=confirmed_df.reset_index()
death_df=death_df.reset_index()
recovered_df=recovered_df.reset_index()
dfg=country_df['Country_Region']
dfg=list(dfg)
dfg.remove('MS Zaandam')
dfg.remove('<NAME>')
olo={}
for i in dfg:
olo[i]=do_fuzzy_search(i)
olo['Burma']='MMR'
olo['Congo (Brazzaville)']='COG'
olo['Congo (Brazzaville)']='COD'
olo['Korea, South']='KOR'
olo['Laos']='LA'
olo['Taiwan*']='TWN'
olo['West Bank and Gaza']='PS'
country_df.sort_values(by=['Confirmed'],ascending=False,inplace=True)
country_df=country_df.reset_index()
country_df.drop(columns=['index'],inplace=True)
#country_df=country_df.head(10)
l=country_df['Country_Region']
l=list(l)
l.remove('MS Zaandam')
l.remove('Diamond Princess')
"""
confirmed_df=confirmed_df.loc[confirmed_df['Country/Region'].isin(l) ]
death_df=death_df.loc[death_df['Country/Region'].isin(l) ]
recovered_df=recovered_df.loc[recovered_df['Country/Region'].isin(l) ]
"""
confirmed_df=pd.melt(confirmed_df,id_vars='Country/Region',var_name='Date',value_name='No. Of Confirmed Cases')
death_df=pd.melt(death_df,id_vars='Country/Region',var_name='Date',value_name='No. Of Death Cases')
recovered_df=pd.melt(recovered_df,id_vars='Country/Region',var_name='Date',value_name='No. Of Recovered Cases')
confirmed_df=confirmed_df.pivot(index='Date', columns='Country/Region', values='No. Of Confirmed Cases')
death_df=death_df.pivot(index='Date', columns='Country/Region', values='No. Of Death Cases')
recovered_df=recovered_df.pivot(index='Date', columns='Country/Region', values='No. Of Recovered Cases')
confirmed_df=confirmed_df.reset_index()
death_df=death_df.reset_index()
recovered_df=recovered_df.reset_index()
death_df['Date']=pd.to_datetime(death_df['Date'])
confirmed_df['Date']=pd.to_datetime(confirmed_df['Date'])
recovered_df['Date']= | pd.to_datetime(recovered_df['Date']) | pandas.to_datetime |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [(i - length / 2) % length for i in range(length)]
pandas_df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [
np.nan if i % 2 == 0 else modin_df.index[i] for i in range(length)
]
pandas_df.index = [
np.nan if i % 2 == 0 else pandas_df.index[i] for i in range(length)
]
else:
length = len(modin_df.columns)
modin_df.columns = [
np.nan if i % 2 == 0 else modin_df.columns[i] for i in range(length)
]
pandas_df.columns = [
np.nan if i % 2 == 0 else pandas_df.columns[i] for i in range(length)
]
modin_result = modin_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
pandas_result = pandas_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
pandas_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
df_equals(modin_df_cp, pandas_df_cp)
# MultiIndex
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pd.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(modin_df))]
)
pandas_df.index = pandas.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(pandas_df))]
)
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(level=0), pandas_df.sort_index(level=0))
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(axis=0), pandas_df.sort_index(axis=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(self, request, data, axis, ascending, na_position):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name and (
(axis == 0 or axis == "over rows")
or name_contains(request.node.name, numeric_dfs)
):
index = (
modin_df.index if axis == 1 or axis == "columns" else modin_df.columns
)
key = index[0]
modin_result = modin_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
keys = [key, index[-1]]
modin_result = modin_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
def test_squeeze(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
ray_df = pd.DataFrame(frame_data).squeeze()
df_equals(ray_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
ray_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(ray_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
ray_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(ray_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
ray_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(ray_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
ray_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(ray_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
def test_stack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).stack()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_std(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_style(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).style
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_sum(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sum_single_column(self, data):
modin_df = pd.DataFrame(data).iloc[:, [0]]
pandas_df = pandas.DataFrame(data).iloc[:, [0]]
df_equals(modin_df.sum(), pandas_df.sum())
df_equals(modin_df.sum(axis=1), pandas_df.sum(axis=1))
def test_swapaxes(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).swapaxes(0, 1)
def test_swaplevel(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.swaplevel("Number", "Color")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(self, data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.tail(n), pandas_df.tail(n))
df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))
def test_take(self):
df = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
with pytest.warns(UserWarning):
df.take([0, 3])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_records(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert np.array_equal(modin_df.to_records(), pandas_df.to_records())
def test_to_sparse(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_sparse()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_string(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert modin_df.to_string() == to_pandas(modin_df).to_string()
def test_to_timestamp(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().to_timestamp()
def test_to_xarray(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_xarray()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform(self, request, data, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform_numeric(self, request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
# Uncomment below once #165 is merged
# Test for map across full axis for select indices
# df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
# Test for map across full axis
# df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
# Test for map across blocks
# df_equals(modin_df.T.notna(), pandas_df.T.notna())
def test_truncate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).truncate()
def test_tshift(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().tshift()
def test_tz_convert(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles").tz_convert("America/Los_Angeles")
def test_tz_localize(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles")
def test_unstack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).unstack()
def test_update(self):
df = pd.DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = pd.DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = pd.DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
df_equals(df, expected)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_values(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
np.testing.assert_equal(modin_df.values, pandas_df.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_var(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_where(self):
frame_data = random_state.randn(100, 10)
pandas_df = pandas.DataFrame(frame_data, columns=list("abcdefghij"))
modin_df = pd.DataFrame(frame_data, columns=list("abcdefghij"))
pandas_cond_df = pandas_df % 5 < 2
modin_cond_df = modin_df % 5 < 2
pandas_result = pandas_df.where(pandas_cond_df, -pandas_df)
modin_result = modin_df.where(modin_cond_df, -modin_df)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df.loc[3]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=1)
modin_result = modin_df.where(modin_cond_df, other, axis=1)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df["e"]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=0)
modin_result = modin_df.where(modin_cond_df, other, axis=0)
assert all((to_pandas(modin_result) == pandas_result).all())
pandas_result = pandas_df.where(pandas_df < 2, True)
modin_result = modin_df.where(modin_df < 2, True)
assert all((to_pandas(modin_result) == pandas_result).all())
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
df = pd.DataFrame(data=d)
df = df.set_index(["class", "animal", "locomotion"])
with pytest.warns(UserWarning):
df.xs("mammal")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key = modin_df.columns[0]
modin_col = modin_df.__getitem__(key)
assert isinstance(modin_col, pd.Series)
pd_col = pandas_df[key]
df_equals(pd_col, modin_col)
slices = [
(None, -1),
(-1, None),
(1, 2),
(1, None),
(None, 1),
(1, -1),
(-3, -1),
(1, -1, 2),
]
# slice test
for slice_param in slices:
s = slice(*slice_param)
df_equals(modin_df[s], pandas_df[s])
# Test empty
df_equals(pd.DataFrame([])[:10], pandas.DataFrame([])[:10])
def test_getitem_empty_mask(self):
# modin-project/modin#517
modin_frames = []
pandas_frames = []
data1 = np.random.randint(0, 100, size=(100, 4))
mdf1 = pd.DataFrame(data1, columns=list("ABCD"))
pdf1 = pandas.DataFrame(data1, columns=list("ABCD"))
modin_frames.append(mdf1)
pandas_frames.append(pdf1)
data2 = np.random.randint(0, 100, size=(100, 4))
mdf2 = pd.DataFrame(data2, columns=list("ABCD"))
pdf2 = pandas.DataFrame(data2, columns=list("ABCD"))
modin_frames.append(mdf2)
pandas_frames.append(pdf2)
data3 = np.random.randint(0, 100, size=(100, 4))
mdf3 = pd.DataFrame(data3, columns=list("ABCD"))
pdf3 = pandas.DataFrame(data3, columns=list("ABCD"))
modin_frames.append(mdf3)
pandas_frames.append(pdf3)
modin_data = pd.concat(modin_frames)
pandas_data = pandas.concat(pandas_frames)
df_equals(
modin_data[[False for _ in modin_data.index]],
pandas_data[[False for _ in modin_data.index]],
)
def test_getitem_datetime_slice(self):
data = {"data": range(1000)}
index = pd.date_range("2017/1/4", periods=1000)
modin_df = pd.DataFrame(data=data, index=index)
pandas_df = pandas.DataFrame(data=data, index=index)
s = slice("2017-01-06", "2017-01-09")
df_equals(modin_df[s], pandas_df[s])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getattr__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
col = modin_df.__getattr__(key)
col = modin_df.__getattr__("col1")
assert isinstance(col, pd.Series)
col = getattr(modin_df, "col1")
assert isinstance(col, pd.Series)
# Check that lookup in column doesn't override other attributes
df2 = modin_df.rename(index=str, columns={key: "columns"})
assert isinstance(df2.columns, pandas.Index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___setitem__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.__setitem__(modin_df.columns[-1], 1)
pandas_df.__setitem__(pandas_df.columns[-1], 1)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df[modin_df.columns[-1]] = pd.DataFrame(modin_df[modin_df.columns[0]])
pandas_df[pandas_df.columns[-1]] = pandas.DataFrame(
pandas_df[pandas_df.columns[0]]
)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
rows = len(modin_df)
arr = np.arange(rows * 2).reshape(-1, 2)
modin_df[modin_df.columns[-1]] = arr
pandas_df[pandas_df.columns[-1]] = arr
df_equals(pandas_df, modin_df)
with pytest.raises(ValueError, match=r"Wrong number of items passed"):
modin_df["___NON EXISTENT COLUMN"] = arr
modin_df[modin_df.columns[0]] = np.arange(len(modin_df))
pandas_df[pandas_df.columns[0]] = np.arange(len(pandas_df))
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(columns=modin_df.columns)
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
for col in modin_df.columns:
modin_df[col] = np.arange(1000)
for col in pandas_df.columns:
pandas_df[col] = np.arange(1000)
df_equals(modin_df, pandas_df)
# Test series assignment to column
modin_df = pd.DataFrame(columns=modin_df.columns)
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
modin_df[modin_df.columns[-1]] = modin_df[modin_df.columns[0]]
pandas_df[pandas_df.columns[-1]] = pandas_df[pandas_df.columns[0]]
df_equals(modin_df, pandas_df)
# Transpose test
modin_df = pd.DataFrame(data).T
pandas_df = pandas.DataFrame(data).T
# We default to pandas on non-string column names
if not all(isinstance(c, str) for c in modin_df.columns):
with pytest.warns(UserWarning):
modin_df[modin_df.columns[0]] = 0
else:
modin_df[modin_df.columns[0]] = 0
pandas_df[pandas_df.columns[0]] = 0
df_equals(modin_df, pandas_df)
modin_df.columns = [str(i) for i in modin_df.columns]
pandas_df.columns = [str(i) for i in pandas_df.columns]
modin_df[modin_df.columns[0]] = 0
pandas_df[pandas_df.columns[0]] = 0
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___len__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert len(modin_df) == len(pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__neg__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__neg__()
else:
modin_result = modin_df.__neg__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = ~pandas_df
except Exception as e:
with pytest.raises(type(e)):
repr(~modin_df)
else:
modin_result = ~modin_df
df_equals(modin_result, pandas_result)
def test___hash__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__hash__()
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___iter__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterator = modin_df.__iter__()
# Check that modin_iterator implements the iterator interface
assert hasattr(modin_iterator, "__iter__")
assert hasattr(modin_iterator, "next") or hasattr(modin_iterator, "__next__")
pd_iterator = pandas_df.__iter__()
assert list(modin_iterator) == list(pd_iterator)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
result = False
key = "Not Exist"
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
if "empty_data" not in request.node.name:
result = True
key = pandas_df.columns[0]
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___nonzero__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
# Always raises ValueError
modin_df.__nonzero__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = abs(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
abs(modin_df)
else:
modin_result = abs(modin_df)
df_equals(modin_result, pandas_result)
def test___round__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__round__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___array__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert_array_equal(modin_df.__array__(), pandas_df.__array__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___bool__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__bool__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__bool__()
else:
modin_result = modin_df.__bool__()
df_equals(modin_result, pandas_result)
def test___getstate__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__getstate__()
def test___setstate__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__setstate__(None)
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = pandas_df.columns[0]
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
modin_df.__delitem__(key)
pandas_df.__delitem__(key)
df_equals(modin_df, pandas_df)
# Issue 2027
last_label = pandas_df.iloc[:, -1].name
modin_df.__delitem__(last_label)
pandas_df.__delitem__(last_label)
df_equals(modin_df, pandas_df)
def test__options_display(self):
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 102))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
pandas.options.display.max_rows = 10
pandas.options.display.max_columns = 10
x = repr(pandas_df)
pd.options.display.max_rows = 5
pd.options.display.max_columns = 5
y = repr(modin_df)
assert x != y
pd.options.display.max_rows = 10
pd.options.display.max_columns = 10
y = repr(modin_df)
assert x == y
# test for old fixed max values
pandas.options.display.max_rows = 75
pandas.options.display.max_columns = 75
x = repr(pandas_df)
pd.options.display.max_rows = 75
pd.options.display.max_columns = 75
y = repr(modin_df)
assert x == y
def test___finalize__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__finalize__(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___copy__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy, pandas_df_copy = modin_df.__copy__(), pandas_df.__copy__()
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___deepcopy__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy, pandas_df_copy = (
modin_df.__deepcopy__(),
pandas_df.__deepcopy__(),
)
df_equals(modin_df_copy, pandas_df_copy)
def test___repr__(self):
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 100))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 99))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 101))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 102))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# ___repr___ method has a different code path depending on
# whether the number of rows is >60; and a different code path
# depending on the number of columns is >20.
# Previous test cases already check the case when cols>20
# and rows>60. The cases that follow exercise the other three
# combinations.
# rows <= 60, cols > 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(10, 100))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# rows <= 60, cols <= 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(10, 10))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# rows > 60, cols <= 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(100, 10))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# Empty
pandas_df = pandas.DataFrame(columns=["col{}".format(i) for i in range(100)])
modin_df = pd.DataFrame(columns=["col{}".format(i) for i in range(100)])
assert repr(pandas_df) == repr(modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index_with_multi_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if len(modin_df.columns) > len(pandas_df.columns):
col0 = modin_df.columns[0]
col1 = modin_df.columns[1]
modin_cols = modin_df.groupby([col0, col1]).count().reset_index().columns
pandas_cols = pandas_df.groupby([col0, col1]).count().reset_index().columns
assert modin_cols.equals(pandas_cols)
def test_reset_index_with_named_index(self):
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
modin_df.index.name = pandas_df.index.name = "NAME_OF_INDEX"
df_equals(modin_df, pandas_df)
df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))
modin_df.reset_index(drop=True, inplace=True)
pandas_df.reset_index(drop=True, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
modin_df.index.name = pandas_df.index.name = "NEW_NAME"
df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_inplace_series_ops(self, data):
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import pandas
from jardin import config as config
from jardin.query_builders import \
SelectQueryBuilder, \
InsertQueryBuilder, \
UpdateQueryBuilder, \
DeleteQueryBuilder, \
RawQueryBuilder
from jardin.cache_stores import cached
def set_defaults(func):
def wrapper(self, *args, **kwargs):
kwargs.update(
model_metadata=self.model_metadata,
scheme=self.db_client.db_config.scheme,
lexicon=self.db_client.lexicon
)
return func(self, *args, **kwargs)
return wrapper
class DatabaseAdapter(object):
def __init__(self, db_client, model_metadata):
self.db_client = db_client
self.model_metadata = model_metadata
@set_defaults
def select(self, **kwargs):
query = SelectQueryBuilder(**kwargs).query
config.logger.debug(query)
results, columns = self.db_client.execute(*query, write=False)
if results is None and columns is None:
return None
return pandas.DataFrame.from_records(results, columns=columns, coerce_float=True)
@set_defaults
def write(self, query_builder, **kwargs):
query = query_builder(**kwargs).query
config.logger.debug(query)
returning_ids = self.db_client.execute(*query, write=True, **kwargs)
if len(returning_ids) > 0:
return self.select(where={kwargs['primary_key']: returning_ids})
return None
def insert(self, **kwargs):
return self.write(InsertQueryBuilder, **kwargs)
def update(self, **kwargs):
return self.write(UpdateQueryBuilder, **kwargs)
@set_defaults
def delete(self, **kwargs):
query = DeleteQueryBuilder(**kwargs).query
config.logger.debug(query)
self.db_client.execute(*query, write=False)
@set_defaults
@cached
def raw_query(self, **kwargs):
query = RawQueryBuilder(**kwargs).query
config.logger.debug(query)
results, columns = self.db_client.execute(*query, write=False)
if results is None and columns is None:
return None
return | pandas.DataFrame.from_records(results, columns=columns, coerce_float=True) | pandas.DataFrame.from_records |
import numpy as np
from pandas import DataFrame, MultiIndex
import pandas._testing as tm
from pandas.core.arrays import PandasArray
class TestToDictOfBlocks:
def test_copy_blocks(self, float_frame):
# GH#9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
blocks = df._to_dict_of_blocks(copy=True)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# GH#9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
blocks = df._to_dict_of_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_to_dict_of_blocks_item_cache():
# Calling to_dict_of_blocks should not poison item_cache
df = | DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) | pandas.DataFrame |
#!/usr/bin/python3
import argparse
import json
import logging
import os
import socket
import threading
import time
import bjoern
import falcon
import jinja2
import pandas as pd
from prometheus_client import start_http_server, Counter, Gauge
import requests
import schedule
from gamutrf.sigwindows import calc_db
from gamutrf.sigwindows import choose_record_signal
from gamutrf.sigwindows import choose_recorders
from gamutrf.sigwindows import find_sig_windows
from gamutrf.sigwindows import parse_freq_excluded
from gamutrf.sigwindows import get_center
SOCKET_TIMEOUT = 1.0
ROLLOVERHZ = 100e6
PEAK_DBS = {}
def falcon_response(resp, text, status):
resp.status = status
resp.text = text
resp.content_type = 'text/html'
def ok_response(resp, text='ok!'):
falcon_response(resp, text=text, status=falcon.HTTP_200)
def error_response(resp, text='error!'):
falcon_response(resp, text=text, status=falcon.HTTP_500)
def load_template(name):
path = os.path.join('templates', name)
with open(os.path.abspath(path), 'r') as fp:
return jinja2.Template(fp.read())
class ActiveRequests:
def on_get(self, req, resp):
all_jobs = schedule.get_jobs()
ok_response(resp, f'{all_jobs}')
class ScannerForm:
def on_get(self, req, resp):
template = load_template('scanner_form.html')
ok_response(resp, template.render(bins=PEAK_DBS))
class Result:
def on_post(self, req, resp):
# TODO validate input
try:
recorder = f'http://{req.media["worker"]}:8000/'
signal_hz = int(int(req.media['frequency']) * 1e6)
record_bps = int(int(req.media['bandwidth']) * (1024 * 1024))
record_samples = int(record_bps * int(req.media['duration']))
recorder_args = f'record/{signal_hz}/{record_samples}/{record_bps}'
timeout = int(req.media['duration'])
response = None
if int(req.media['repeat']) == -1:
schedule.every(timeout).seconds.do(run_threaded, record, recorder=recorder, recorder_args=recorder_args, timeout=timeout).tag(f'{recorder}{recorder_args}-{timeout}')
ok_response(resp)
else:
response = recorder_req(recorder, recorder_args, timeout)
time.sleep(timeout)
for _ in range(int(req.media['repeat'])):
response = recorder_req(recorder, recorder_args, timeout)
time.sleep(timeout)
if response:
ok_response(resp)
else:
ok_response(resp, f'Request {recorder} {recorder_args} failed.')
except Exception as e:
error_response(resp, f'{e}')
def record(recorder, recorder_args, timeout):
recorder_req(recorder, recorder_args, timeout)
def run_threaded(job_func, recorder, recorder_args, timeout):
job_thread = threading.Thread(target=job_func, args=(recorder, recorder_args, timeout,))
job_thread.start()
def init_prom_vars():
prom_vars = {
'last_bin_freq_time': Gauge('last_bin_freq_time', 'epoch time last signal in each bin', labelnames=('bin_mhz',)),
'worker_record_request': Gauge('worker_record_request', 'record requests made to workers', labelnames=('worker',)),
'freq_power': Gauge('freq_power', 'bin frequencies and db over time', labelnames=('bin_freq',)),
'new_bins': Counter('new_bins', 'frequencies of new bins', labelnames=('bin_freq',)),
'old_bins': Counter('old_bins', 'frequencies of old bins', labelnames=('bin_freq',)),
'bin_freq_count': Counter('bin_freq_count', 'count of signals in each bin', labelnames=('bin_mhz',)),
'frame_counter': Counter('frame_counter', 'number of frames processed'),
}
return prom_vars
def update_prom_vars(peak_dbs, new_bins, old_bins, prom_vars):
freq_power = prom_vars['freq_power']
new_bins_prom = prom_vars['new_bins']
old_bins_prom = prom_vars['old_bins']
for freq in peak_dbs:
freq_power.labels(bin_freq=freq).set(peak_dbs[freq])
for nbin in new_bins:
new_bins_prom.labels(bin_freq=nbin).inc()
for obin in old_bins:
old_bins_prom.labels(bin_freq=obin).inc()
def process_fft(args, prom_vars, ts, fftbuffer, lastbins):
global PEAK_DBS
tsc = time.ctime(ts)
logging.info(f'new frame at {tsc}')
df = | pd.DataFrame(fftbuffer, columns=['ts', 'freq', 'db']) | pandas.DataFrame |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from math import log
from functools import wraps
from operator import itemgetter
from bisect import bisect
import sys
import pandas as pd
from collections import OrderedDict, defaultdict
import unittest, itertools
# interval tests
# single pos intervals
#|#|#|#|#|#|#
# end to end
###|###|###
# first starts aat start and last ends at last (3 intervals)
### ### ###
# first starts after start and last ends before last (3 intervals)
### ### ###
# Three intervals tiled
######
######
######
# Three intevals inside each other
#######
#####
###
# import glob
# import os
# chrom_sizes = dict()
# for p in glob.glob('/Users/kmt/Downloads/igv-master/genomes/sizes/*.sizes'):
# name = os.path.basename(p).replace('.chrom.sizes', '')
# with open(p) as f:
# d = dict(l.split()[:2] for l in f)
# chrom_sizes[name] = d
# print('chrom_sizes = {')
# for chrom, d in chrom_sizes.items():
# print("'{}': {}".format(chrom, repr(d)))
# print('}')
from chromwindow.chrom_sizes import chrom_sizes
def print_intervals(df):
starts, ends = list(df['start']), list(df['end'])
m = max(ends)
for s, e in zip(starts, ends):
print(" " * (s) + "#" * (e-s) + " " * (m - e))
class TestEvenWindows(unittest.TestCase):
def setUp(self):
self.df_single_end_to_end = pd.DataFrame({'start': list(range(30)), 'end': list(range(1, 31))})
self.df_three_end_to_end = pd.DataFrame({'start': [0, 10, 20], 'end': [10, 20, 30]})
self.df_three_nonoverl = pd.DataFrame({'start': [5, 15, 25], 'end': [10, 20, 30]})
self.df_three_tiled = pd.DataFrame({'start': [0, 3, 6], 'end': [21, 24, 27]})
# print_intervals(self.df_three_tiled)
########## ####### ####
####### ####### #######
#### ####### ##########
self.df_three_nested = pd.DataFrame({'start': [0, 3, 6], 'end': [27, 24, 21]})
# print_intervals(self.df_three_nested)
###########################
#####################
###############
########## ####### ##########
####### ####### #######
#### ####### ####
self.df_empty = pd.DataFrame()
def test_empty(self):
with self.assertRaises(KeyError):
even_windows(self.df_empty, 3)
def test_single_end_to_end_1(self):
# three perfect windows
self.assertEqual(even_windows(self.df_single_end_to_end, 10), [10, 10, 10])
def test_single_end_to_end_2(self):
# one perfect window
self.assertEqual(even_windows(self.df_single_end_to_end, 30), [30])
def test_single_end_to_end_3(self):
# one window too large
self.assertEqual(even_windows(self.df_single_end_to_end, 31), [30])
def test_single_end_to_end_4(self):
# three windows, last one not filled
self.assertEqual(even_windows(self.df_single_end_to_end, 12), [12, 12, 6])
def test_three_end_to_end_1(self):
# three perfect windows
self.assertEqual(even_windows(self.df_three_end_to_end, 10), [10, 10, 10])
def test_three_end_to_end_2(self):
# one perfect window
self.assertEqual(even_windows(self.df_three_end_to_end, 30), [30])
def test_three_end_to_end_3(self):
# one window too large
self.assertEqual(even_windows(self.df_three_end_to_end, 31), [30])
def test_three_end_to_end_4(self):
# last window one not filled
self.assertEqual(even_windows(self.df_three_end_to_end, 12), [12, 12, 6])
def test_three_nonoverl_1(self):
# three perfect windows
self.assertEqual(even_windows(self.df_three_nonoverl, 5), [10, 10, 10])
def test_three_nonoverl_2(self):
# one perfect window
self.assertEqual(even_windows(self.df_three_nonoverl, 15), [30])
def test_three_nonoverl_3(self):
# one window too large
self.assertEqual(even_windows(self.df_three_nonoverl, 16), [30])
def test_three_nonoverl_4(self):
# last window not filled
self.assertEqual(even_windows(self.df_three_nonoverl, 10), [20, 10])
def test_three_tiled_1(self):
# three perfect windows
self.assertEqual(even_windows(self.df_three_tiled, 21), [10, 7, 10])
def test_three_tiled_2(self):
# one perfect window
self.assertEqual(even_windows(self.df_three_tiled, 63), [27])
def test_three_tiled_3(self):
# one window too large
self.assertEqual(even_windows(self.df_three_tiled, 64), [27])
def test_three_tiled_4(self):
# last window not filled
self.assertEqual(even_windows(self.df_three_tiled, 60), [24, 3])
def test_three_nested_1(self):
# three perfect windows
self.assertEqual(even_windows(self.df_three_nested, 21), [10, 7, 10])
def test_three_nested_2(self):
# one perfect window
self.assertEqual(even_windows(self.df_three_nested, 63), [27])
def test_three_nested_3(self):
# one window too large
self.assertEqual(even_windows(self.df_three_nested, 64), [27])
def test_three_nested_4(self):
# last window not filled
self.assertEqual(even_windows(self.df_three_nested, 60), [24, 3])
class TestWindowCoordinates(unittest.TestCase):
def test_window_coord_1(self):
w = iter(WindowCoordinates(binsize=10, logbase=1, bins=None))
self.assertEqual(list(itertools.islice(w, 4)), [(0, 10), (10, 10), (20, 10), (30, 10)])
def test_window_coord_2(self):
w = iter(WindowCoordinates(binsize=2, logbase=2, bins=None))
self.assertEqual(list(itertools.islice(w, 4)), [(0, 2), (2, 4), (6, 8), (14, 16)])
def test_window_coord_3(self):
w = iter(WindowCoordinates(binsize=None, logbase=1, bins=[10, 20, 30]))
self.assertEqual(list(itertools.islice(w, 4)), [(0, 10), (10, 20), (30, 30), (60, float('inf'))])
class TestConcatDicts(unittest.TestCase):
pass
class TestStatsToDataFrame(unittest.TestCase):
pass
class TestGenomicWindows(unittest.TestCase):
pass
class TestStoreGroupbyApply(unittest.TestCase):
pass
# concat_dicts(l):
# stats_data_frame(list_of_stat_results):
# genomic_windows(full_df, func, bin_iter):
# window(size=None, logbase=1, even=None):
# store_groupby_apply(store_file_name, col_names, fun, df_name='df', group_keys=True):
class TestWindowDecorator(unittest.TestCase):
def setUp(self):
self.df_single_end_to_end = pd.DataFrame({'start': list(range(30)), 'end': list(range(1, 31))})
self.df_three_end_to_end = pd.DataFrame({'start': [0, 10, 20], 'end': [10, 20, 30]})
self.df_three_nonoverl = | pd.DataFrame({'start': [5, 15, 25], 'end': [10, 20, 30]}) | pandas.DataFrame |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import scipy as sp
from scipy import linalg, optimize
from scipy.optimize import minimize, minimize_scalar, rosen, rosen_der, brentq, fminbound, curve_fit
import math
from scipy.stats import norm
import pylab as P
import matplotlib.mlab as mlab
import beadpy
def segmentplotter(table,maxrate, ymin, ymax, legloc = 1, scale = 10):
table = table[abs(table['rate']) < maxrate]
x = table['x1']
y = table['displacement']
size = abs(table['rate'])/scale
fig, ax = plt.subplots(figsize=(10, 7.5))
ax.scatter(x, y, s = size, alpha=0.5, color='magenta', edgecolors='black')
bins = np.linspace(0, maxrate, 4)
if scale < 5:
firstbinsize = 1
elif scale >=5:
firstbinsize = 25
l1 = ax.scatter([],[], s=(firstbinsize)/scale, c = 'magenta')
l2 = ax.scatter([],[], s=bins[1] / scale, c = 'magenta')
l3 = ax.scatter([],[], s=bins[2] / scale, c = 'magenta')
l4 = ax.scatter([],[], s=bins[3] / scale,c = 'magenta')
labels = [firstbinsize, int(bins[1]), int(bins[2]), int(bins[3])]
ax.legend([l1, l2, l3, l4],
labels,
frameon = True,
fontsize = 16,
handlelength = 2,
loc = legloc,
borderpad = 0.5,
handletextpad = 1,
title ='Rate (nt/s)',
scatterpoints = 1)
ax.set_xlabel('Segment start time (s)', fontsize=16)
ax.set_ylabel('Segment length (nt)', fontsize=16)
ax.set_xlim((0, max(x)))
ax.set_ylim((ymin, ymax))
fig.tight_layout(pad=2);
ax.grid(True)
fig.savefig('segments.png', dpi = 300)
#plt.clf()
return ax;
def filterer(resultstable, segmentstable, minrate, maxrate, mindisplacement, starttime, endtime):
filtsegments = segmentstable[(abs(segmentstable['rate']) > minrate)
& (segmentstable['rate'] < maxrate)
& (segmentstable['displacement'] >= mindisplacement)
& (segmentstable['x1'] > starttime)
& (segmentstable['x1'] < endtime)]
filtresults = resultstable[resultstable.trajectory.isin(filtsegments.trajectory)]
filtresults.to_csv('filtresults.csv', index = False, float_format='%.4f')
filtsegments.to_csv('filtsegments.csv', index = False)
return filtresults, filtsegments;
def trajectory_plotter(resultstable, exampletraj, sigma = 500, method = ('global', 'table'), sigma_start = 10, sigma_end = 100, eventregion = (200,500), segmenttable = 0):
exampletraj = int(exampletraj)
fig, ax = plt.subplots(figsize = (10, 7.5))
ax.plot(resultstable['time'][resultstable['trajectory'] == exampletraj],
resultstable['nucleotides'][resultstable['trajectory'] == exampletraj]/1000,
lw = 3)
ax.set_xlabel("Time (s)", fontsize=16)
ax.set_ylabel("Nucleotides synthesised (kb)", fontsize=16)
ax.set_xlim((-50,resultstable['time'][resultstable['trajectory'] == exampletraj].max()+50))
ax.set_ylim((-0.5 + resultstable['nucleotides'][resultstable['trajectory'] == exampletraj].min()/1000,0.5 + resultstable['nucleotides'][resultstable['trajectory'] == exampletraj].max()/1000))
if not method == 'nofit':
if method == ('global', 'table'):
exampletrajseg = beadpy.ratefinder(resultstable[resultstable['trajectory']==exampletraj], segtable = segmenttable, sigmaval = sigma)
elif method == ('global', 'region'):
exampletrajseg = beadpy.segment_finder(resultstable[(resultstable['time']>eventregion[0]) & (resultstable['time'] < eventregion[1])], sigma = sigma, traj = exampletraj)
elif method == ('global', 'whole'):
exampletrajseg = beadpy.segment_finder(resultstable, sigma = sigma, traj = exampletraj)
elif method == ('auto', 'table'):
test, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
exampletrajseg = beadpy.ratefinder(resultstable[resultstable['trajectory']==exampletraj], segtable = segmenttable, sigmaval = sigma)
elif method == ('auto', 'region'):
test, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
exampletrajseg = beadpy.segment_finder(resultstable[(resultstable['time']>eventregion[0]) & (resultstable['time'] < eventregion[1])], sigma = sigma, traj = exampletraj)
elif method == ('auto', 'whole'):
exampletrajseg, sigma = beadpy.segment_finder(resultstable, method = 'auto', traj = int(exampletraj), returnsigma = 'yes', sigma_start = sigma_start, sigma_end = sigma_end)
fig.suptitle('Trajectory '+str(exampletraj)+', sigma '+str(int(sigma)), fontsize = 16)
for row_index, row in exampletrajseg[exampletrajseg.trajectory==exampletraj].iterrows():
ax.plot([row['x1'], row['x2']], [row['y1']/1000, row['y2']/1000],'k-', lw=2, color='Magenta', linestyle='-')
else:
fig.suptitle('Trajectory '+str(exampletraj), fontsize = 16)
ax.tick_params(axis='both', labelsize=14)
fig.tight_layout(pad=4)
if not method == 'nofit':
fig.savefig('traj_'+str(exampletraj)+'_sigma_'+str(sigma)+'.png', dpi = 300)
return exampletrajseg
else:
fig.savefig('traj_'+str(exampletraj)+'.png', dpi = 300)
def weighted_avg_and_std(values, weights):
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return (average, math.sqrt(variance))
def ratefinder(restable, segtable, sigmaval = 300):
#Filter the results to only be between the first data point of the filtered segments, and the last for each trajectory.
restable = restable[restable.trajectory.isin(segtable.trajectory)]
segtable = segtable[segtable['trajectory'].isin(restable['trajectory'])]
groupedsegs = segtable.groupby(['trajectory'], as_index=False)
starttimes = groupedsegs['x1'].min()
endtimes = groupedsegs['x2'].max()
startendtimes = pd.merge(left=starttimes, right = endtimes, how='left', left_on='trajectory', right_on='trajectory')
mergedfiltresults = pd.merge(left=restable,right=startendtimes, how='left', left_on='trajectory', right_on='trajectory')
finefiltresults = mergedfiltresults[(mergedfiltresults['time'] >= mergedfiltresults['x1'])
& (mergedfiltresults['time'] <= mergedfiltresults['x2'])]
#Do change point analysis on these events:
segmentsfine = beadpy.segment_finder(finefiltresults, sigma = sigmaval)
return segmentsfine;
def sigmaval_finder(restable, sigma_start = 0, sigma_end = 150):
restable = restable.reset_index(drop=True)
sigmaregion = restable[(restable.time > sigma_start) & (restable.time < sigma_end)]
sigmavals = sigmaregion.groupby('trajectory')['nucleotides'].apply(lambda x:x.rolling(center=False,window=20).std().mean())
sigmavals = sigmavals[np.logical_not(np.isnan(sigmavals))]
trajectories = sigmavals.index.tolist()
sigmavals = sigmavals.tolist()
return sigmavals, trajectories;
def ratefinder_autosigma(restable, segtable, sigma_start, sigma_end):
segtable = segtable[segtable['trajectory'].isin(restable['trajectory'])]
restable = restable[restable.trajectory.isin(segtable.trajectory)]
sigmavals, trajectories = sigmaval_finder(restable, sigma_start, sigma_end)
restable = restable[restable.trajectory.isin(trajectories)]
segtable = segtable[segtable.trajectory.isin(trajectories)]
groupedsegs = segtable.groupby(['trajectory'], as_index=False)
starttimes = groupedsegs['x1'].min()
endtimes = groupedsegs['x2'].max()
startendtimes = pd.merge(left=starttimes, right = endtimes, how='left', left_on='trajectory', right_on='trajectory')
mergedfiltresults = pd.merge(left=restable,right=startendtimes, how='left', left_on='trajectory', right_on='trajectory')
finefiltresults = mergedfiltresults[(mergedfiltresults['time'] >= mergedfiltresults['x1'])
& (mergedfiltresults['time'] <= mergedfiltresults['x2'])]
segmentsfine = beadpy.segment_finder(finefiltresults, sigma = sigmavals)
return segmentsfine;
def ratehist(segtable, minimumrate = 5, maximumrate = 1000, numbins = 50, weighting = 'displacement'):
fig = plt.figure()
ax = fig.add_subplot(111)
x = segtable[(segtable['rate'] > minimumrate) & (segtable['rate'] < maximumrate)]
if weighting == 'none':
n, bins, patches = P.hist(x.rate, bins = numbins, edgecolor = 'black')
P.text(0.95, 0.95, str(len(x)) + ' segments \n' + 'from ' + str(x.trajectory.nunique()) + ' events',
verticalalignment='top', horizontalalignment='right',
color='magenta', fontsize=15, transform=ax.transAxes)
elif weighting == 'fastest':
xgrouped = x.groupby(['trajectory'])
maxrates = xgrouped['rate'].max()
n, bins, patches = P.hist(maxrates, bins = numbins, edgecolor = 'black')
P.text(0.95, 0.95, str(x.trajectory.nunique()) + ' events',
verticalalignment='top', horizontalalignment='right',
color='magenta', fontsize=15, transform=ax.transAxes)
elif weighting == 'longest':
xgrouped = x.groupby(['trajectory'])
x = xgrouped.apply(lambda g: g[g['displacement'] == g['displacement'].max()])
n, bins, patches = P.hist(x.rate, bins = numbins, edgecolor = 'black')
P.text(0.95, 0.95, str(x.trajectory.nunique()) + ' events',
verticalalignment='top', horizontalalignment='right',
color='magenta', fontsize=15, transform=ax.transAxes)
else:
n, bins, patches = P.hist(x.rate, bins = numbins, weights = x[weighting]/sum(x[weighting]), normed = 1, edgecolor = 'black')
P.text(0.95, 0.95, str(len(x)) + ' segments \n' + 'from ' + str(x.trajectory.nunique()) + ' events',
verticalalignment='top', horizontalalignment='right',
color='magenta', fontsize=15, transform=ax.transAxes)
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
P.xlabel('Rate (nt/s)')
P.savefig('rates.png', dpi = 300)
def processivity(segtable, minimumprocessivity = 0, maximumprocessivity=15000, binno = 15, skipno = 0, initialguesses = 'default'):
fig = plt.figure()
ax = fig.add_subplot(111)
groupedsegs = segtable.groupby(['trajectory'], as_index=False)
starty = groupedsegs['y1'].min()
endy = groupedsegs['y2'].max()
displacements = | pd.merge(left=starty, right = endy, how='left', left_on='trajectory', right_on='trajectory') | pandas.merge |
import pandas as pd
import os, re, fnmatch, subprocess
from collections import defaultdict
from shlex import quote
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.igfdb.igfTables import Experiment, Run
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.experimentadaptor import ExperimentAdaptor
from igf_data.igfdb.runadaptor import RunAdaptor
from igf_data.igfdb.collectionadaptor import CollectionAdaptor
from igf_data.igfdb.fileadaptor import FileAdaptor
from igf_data.utils.fileutils import calculate_file_checksum
class Collect_seqrun_fastq_to_db:
'''
A class for collecting raw fastq files after demultiplexing and storing them in database.
Additionally this will also create relevant entries for the experiment and run tables in database
:param fastq_dir: A directory path for file look up
:param model_name: Sequencing platform information
:param seqrun_igf_id: Sequencing run name
:param session_class: A database session class
:param flowcell_id: Flowcell information for the run
:param samplesheet_file: Samplesheet filepath
:param samplesheet_filename: Name of the samplesheet file, default SampleSheet.csv
:param collection_type: Collection type information for new fastq files, default demultiplexed_fastq
:param file_location: Fastq file location information, default HPC_PROJECT
:param collection_table: Collection table information for fastq files, default run
:param manifest_name: Name of the file manifest file, default file_manifest.csv
:param singlecell_tag: Samplesheet description for singlecell samples, default 10X
'''
def __init__(
self,fastq_dir,model_name,seqrun_igf_id,session_class,flowcell_id,
samplesheet_file=None,samplesheet_filename='SampleSheet.csv',
collection_type='demultiplexed_fastq',file_location='HPC_PROJECT',
collection_table='run', manifest_name='file_manifest.csv',singlecell_tag='10X'):
self.fastq_dir = fastq_dir
self.samplesheet_file = samplesheet_file
self.samplesheet_filename = samplesheet_filename
self.seqrun_igf_id = seqrun_igf_id
self.model_name = model_name
self.session_class = session_class
self.collection_type = collection_type
self.file_location = file_location
self.flowcell_id = flowcell_id
self.collection_table = collection_table
self.manifest_name = manifest_name
self.singlecell_tag = singlecell_tag
def find_fastq_and_build_db_collection(self):
'''
A method for finding fastq files and samplesheet under a run directory
and loading the new files to db with their experiment and run information
It calculates following entries
* library_name
Same as sample_id unless mentioned in 'Description' field of samplesheet
* experiment_igf_id
library_name combined with the platform name
same library sequenced in different platform will be added as separate experiemnt
* run_igf_id
experiment_igf_id combined with sequencing flowcell_id and lane_id
collection name: Same as run_igf_id, fastq files will be added to db collection
using this id
* collection type
Default type for fastq file collections are 'demultiplexed_fastq'
* file_location
Default value is 'HPC_PROJECT'
'''
try:
fastq_files_list = \
self._collect_fastq_and_sample_info()
self._build_and_store_exp_run_and_collection_in_db(
fastq_files_list=fastq_files_list)
except Exception as e:
raise ValueError(
'Failed to find fastq and build collection, error: {0}'.\
format(e))
def _get_fastq_and_samplesheet(self):
try:
fastq_dir = self.fastq_dir
samplesheet_file = self.samplesheet_file
samplesheet_filename = self.samplesheet_filename
r1_fastq_regex = \
re.compile(r'\S+_R1_\d+\.fastq(\.gz)?', re.IGNORECASE)
r2_fastq_regex = \
re.compile(r'\S+_R2_\d+\.fastq(\.gz)?', re.IGNORECASE)
samplesheet_list = list()
r1_fastq_list = list()
r2_fastq_list = list()
if os.path.isdir(fastq_dir):
for root, _, files in os.walk(top=fastq_dir):
if samplesheet_filename in files:
samplesheet_list.append(
os.path.join(root,samplesheet_filename))
for file in files:
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
if len(r2_fastq_list) > 0 and \
len(r1_fastq_list) != len(r2_fastq_list):
raise ValueError(
'R1 {0} and R2 {1}'.format(
len(r1_fastq_list),
len(r2_fastq_list)))
if samplesheet_file is None and \
len(samplesheet_list)==1:
self.samplesheet_file = samplesheet_list[0] # set samplesheet file name
if len(samplesheet_list) > 1:
raise ValueError(
'Found more than one samplesheet file for fastq dir {0}'.\
format(fastq_dir))
if samplesheet_file is None and \
len(samplesheet_list)==0:
raise ValueError(
'No samplesheet file for fastq dir {0}'.\
format(fastq_dir))
elif os.path.isfile(fastq_dir):
if samplesheet_file is None:
raise ValueError(
'Missing samplesheet file for fastq file {0}'.\
format(fastq_dir))
if not fnmatch.fnmatch(file, 'Undetermined_'):
if r1_fastq_regex.match(file):
r1_fastq_list.\
append(os.path.join(root,file))
elif r2_fastq_regex.match(file):
r2_fastq_list.\
append(os.path.join(root,file))
return r1_fastq_list, r2_fastq_list
except Exception as e:
raise ValueError(
'Failed to get fastq and samplesheet, error: {0}'.\
format(e))
@staticmethod
def _link_fastq_file_to_sample(sample_name,r1_fastq_list, r2_fastq_list):
try:
sample_files = \
defaultdict(lambda: defaultdict(lambda: defaultdict()))
r1_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R1_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file1 in r1_fastq_list:
if r1_regex.match(os.path.basename(file1)):
m = r1_regex.match(os.path.basename(file1))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R1'] = file1
if len(r2_fastq_list) > 0:
r2_regex = \
re.compile(
sample_name+'_S\d+_L(\d+)_R2_\d+\.fastq(\.gz)?',
re.IGNORECASE)
for file2 in r2_fastq_list:
if r2_regex.match(os.path.basename(file2)):
m = r2_regex.match(os.path.basename(file2))
lane_id = m.group(1).strip('0')
sample_files[lane_id]['R2'] = file2
return sample_files
except Exception as e:
raise ValueError(
'Failed to link fastq to sample, error: {0}'.format(e))
def _collect_fastq_and_sample_info(self):
'''
An internal method for collecting fastq and sample info
'''
try:
seqrun_igf_id = self.seqrun_igf_id
model_name = self.model_name
flowcell_id = self.flowcell_id
(r1_fastq_list, r2_fastq_list) = \
self._get_fastq_and_samplesheet()
samplesheet_file = self.samplesheet_file
final_data = list()
samplesheet_sc = \
SampleSheet(infile=samplesheet_file) # read samplesheet for single cell check
samplesheet_sc.\
filter_sample_data(
condition_key='Description',
condition_value=self.singlecell_tag,
method='include') # keep only single cell samples
if len(samplesheet_sc._data) >0:
sc_new_data = \
pd.DataFrame(samplesheet_sc._data).\
drop(['Sample_ID','Sample_Name','index'],axis=1).\
drop_duplicates().\
to_dict(orient='records') # remove duplicate entries from single cell samplesheet
final_data.extend(sc_new_data) # add single cell entries to the final dataset
samplesheet_data = \
SampleSheet(infile=samplesheet_file)
samplesheet_data.\
filter_sample_data(
condition_key='Description',
condition_value=self.singlecell_tag,
method='exclude') # keep non single cell samples
if len(samplesheet_data._data) > 0:
final_data.\
extend(samplesheet_data._data) # add normal samples to final data
fastq_files_list = list()
for row in final_data:
description = row['Description']
if description==self.singlecell_tag: # collect required values for single cell projects
sample_name = row['Original_Sample_Name']
sample_id = row['Original_Sample_ID']
else:
sample_name = row['Sample_Name'] # collect default values for normal projects
sample_id = row['Sample_ID']
project_name = row['Sample_Project']
sample_files = \
self._link_fastq_file_to_sample(
sample_name,
r1_fastq_list,
r2_fastq_list)
for lane, lane_files in sample_files.items():
fastq_info = {
'sample_igf_id':sample_id,
'sample_name':sample_name,
'project_igf_id':project_name,
'lane_number':lane,
'seqrun_igf_id':seqrun_igf_id,
'platform_name':model_name,
'flowcell_id':flowcell_id,
'description':description }
for read_type, filepath in lane_files.items():
fastq_info.\
update({read_type:filepath}) # allowing only one file per lane per read type
fastq_files_list.\
append(fastq_info) # adding entries per sample per lane
return fastq_files_list
except Exception as e:
raise ValueError(
'Failed to collect info, error: {0}'.format(e))
@staticmethod
def _count_fastq_reads(fastq_file):
'''
A static method for counting reads from the zipped and bzipped fastq files
required params:
fastq_file: A fastq file with absolute path
'''
try:
if not os.path.exists(fastq_file):
raise IOError(
'fastq file {0} is not found'.format(fastq_file))
if fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq.gz'):
read_cmd = ['zcat',quote(fastq_file)]
elif fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq.bz'):
read_cmd = ['bzcat',quote(fastq_file)]
elif fnmatch.fnmatch(os.path.basename(fastq_file),'*.fastq'):
read_cmd = ['cat',quote(fastq_file)]
else:
raise ValueError(
'file {0} is not recognised'.format(fastq_file))
proc = \
subprocess.\
Popen(
read_cmd,
stdout=subprocess.PIPE)
count_cmd = ['wc','-l']
proc2 = \
subprocess.\
Popen(
count_cmd,
stdin=proc.stdout,
stdout=subprocess.PIPE)
proc.stdout.close()
result = \
int(proc2.communicate()[0].decode('UTF-8'))
if result==0:
raise ValueError(
'Fastq file {0} has zero lines'.format(fastq_file))
result = int(result/4)
return result
except Exception as e:
raise ValueError(
'Failed to count fastq reads, error: {0}'.format(e))
def _calculate_experiment_run_and_file_info(self,data,restricted_list):
try:
if not isinstance(data, pd.Series):
data = pd.Series(data)
# set library id
library_id = data.sample_igf_id
# calcaulate experiment id
experiment_id = \
'{0}_{1}'.format(library_id,data.platform_name)
data['library_name'] = library_id
data['experiment_igf_id'] = experiment_id
# calculate run id
run_igf_id = \
'{0}_{1}_{2}'.format(
experiment_id,
data.flowcell_id,
data.lane_number)
data['run_igf_id'] = run_igf_id
# set collection name and type
data['name'] = run_igf_id
data['type'] = self.collection_type
data['table'] = self.collection_table
data['location'] = self.file_location
# set file md5 and size
if 'R1' in data:
data['R1_md5'] = \
calculate_file_checksum(
filepath=data.R1,
hasher='md5')
data['R1_size'] = \
os.path.getsize(data.R1)
data['R1_READ_COUNT'] = \
self._count_fastq_reads(
fastq_file=data.R1)
if 'R2' in data:
data['R2_md5'] = \
calculate_file_checksum(
filepath=data.R2,
hasher='md5')
data['R2_size'] = \
os.path.getsize(data.R2)
data['R2_READ_COUNT'] = \
self._count_fastq_reads(
fastq_file=data.R2)
# set library strategy
library_layout = 'SINGLE'
if 'R1' in data and 'R2' in data and \
data.R1 is not None and data.R2 is not None:
library_layout='PAIRED'
data['library_layout'] = library_layout
return data
except Exception as e:
raise ValueError(
'Failed to calculate exp, run and file, error: {0}'.\
format(e))
@staticmethod
def _reformat_file_group_data(data):
try:
if isinstance(data, pd.DataFrame):
data = data.to_dict(orient='records')
if not isinstance(data,list):
raise ValueError(
'Expecting list got {0}'.format(type(data)))
reformatted_file_group_data = list()
reformatted_file_data = list()
for row in data:
collection_name = None
collection_type = None
file_location = None
if 'name' in row.keys():
collection_name = row['name']
if 'type' in row.keys():
collection_type = row['type']
if 'location' in row.keys():
file_location = row['location']
if 'R1' in row.keys():
r1_file_path = row['R1']
r1_file_size = \
row['R1_size'] if 'R1_size' in row.keys() else None
r1_file_md5 = \
row['R1_md5'] if 'R1_md5' in row.keys() else None
reformatted_file_data.\
append({
'file_path':r1_file_path,
'md5':r1_file_md5,
'location':file_location,
'size':r1_file_size})
reformatted_file_group_data.\
append({
'name':collection_name,
'type':collection_type,
'file_path':r1_file_path})
if 'R2' in row.keys():
r2_file_path = row['R2']
r2_file_size = \
row['R2_size'] if 'R2_size' in row.keys() else None
r2_file_md5 = \
row['R2_md5'] if 'R2_md5' in row.keys() else None
reformatted_file_data.\
append({
'file_path':r2_file_path,
'md5':r2_file_md5,
'location':file_location,
'size':r2_file_size})
reformatted_file_group_data.\
append({
'name':collection_name,
'type':collection_type,
'file_path':r2_file_path})
file_data = \
pd.DataFrame(reformatted_file_data)
file_data = file_data.dropna() # removing rows witn None values
file_group_data = \
pd.DataFrame(reformatted_file_group_data)
file_group_data = \
file_group_data.dropna() # removing rows with None values
return file_data, file_group_data
except Exception as e:
raise ValueError(
'Failed to reformat file group data, error: {0}'.\
format(e))
def _write_manifest_file(self,file_data):
'''
An internal method for writing file data to the manifest file
'''
try:
manifest_name = self.manifest_name
fastq_dir = self.fastq_dir
manifest_path = \
os.path.join(fastq_dir,manifest_name)
if os.path.exists(manifest_path):
raise ValueError(
'manifest file {0} already present'.\
format(manifest_path))
if isinstance(file_data, list):
file_data = pd.DataFrame(file_data) # convert file data to dataframe
file_data['file_path'] = \
file_data['file_path'].\
map(
lambda x: \
os.path.relpath(x, start=fastq_dir)) # replace filepath with relative path
file_data = \
file_data.drop(['location'],axis=1) # remove file location info
file_data.\
to_csv(
manifest_path,
sep='\t',
encoding='utf-8',
index=False) # write data to manifest file
except Exception as e:
raise ValueError(
'Failed to write manifest file, error: {0}'.\
format(e))
@staticmethod
def _check_existing_data(data,dbsession,table_name,check_column='EXISTS'):
try:
if not isinstance(data, pd.Series):
raise ValueError(
'Expecting a data series and got {0}'.\
format(type(data)))
if table_name=='experiment':
if 'experiment_igf_id' in data and \
not pd.isnull(data['experiment_igf_id']):
experiment_igf_id = data['experiment_igf_id']
ea = \
ExperimentAdaptor(**{'session':dbsession})
experiment_exists = \
ea.check_experiment_records_id(
experiment_igf_id)
if experiment_exists: # store data only if experiment is not existing
data[check_column] = True
else:
data[check_column] = False
return data
else:
raise ValueError(
'Missing or empty required column experiment_igf_id')
elif table_name=='run':
if 'run_igf_id' in data and \
not pd.isnull(data['run_igf_id']):
run_igf_id = data['run_igf_id']
ra = RunAdaptor(**{'session':dbsession})
run_exists = \
ra.check_run_records_igf_id(run_igf_id)
if run_exists: # store data only if run is not existing
data[check_column] = True
else:
data[check_column] = False
return data
else:
raise ValueError(
'Missing or empty required column run_igf_id')
elif table_name=='collection':
if 'name' in data and 'type' in data and \
not pd.isnull(data['name']) and \
not | pd.isnull(data['type']) | pandas.isnull |
import os
import re
from collections import Counter, defaultdict
import itertools
from scipy.spatial.distance import cosine
from sklearn.neighbors import NearestNeighbors
import util as ut
import os.path
from numpy import linalg as LA
import numpy as np
import pandas as pd
import time
import warnings
import sys
from abc import ABC, abstractmethod
#import hdbscan
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from webnlg_util import clean_url
warnings.filterwarnings('ignore')
def performance_debugger(func_name):
def function_name_decoratir(func):
def debug(*args, **kwargs):
long_string = ''
starT = time.time()
print('\n\n######', func_name, ' starts ######')
r = func(*args, **kwargs)
print(func_name, ' took ', time.time() - starT, ' seconds\n')
long_string += str(func_name) + ' took:' + str(time.time() - starT) + ' seconds'
return r
return debug
return function_name_decoratir
class SimilarityCalculator(ABC):
def __init__(self):
self._inverted_index = None
self._num_triples = None
@abstractmethod
def get_similarities(self, inverted_index, num_triples, top_K):
pass
class PPMI(SimilarityCalculator):
def __init__(self):
"""
:param co_occurrences: term to list of terms
:param num_triples:
"""
super().__init__()
self._marginal_probs = None
def calculate_marginal_probabilities(self):
marginal_probs = dict()
for unq_ent, list_of_context_ent in enumerate(self.inverted_index):
# N is multipled by 2 as list_of_context_ent contains other two element of an RDF triple
probability = len(list_of_context_ent) / (self._num_triples * 2)
marginal_probs[unq_ent] = probability
self._marginal_probs = marginal_probs
@performance_debugger('Calculation of PPMIs')
def calculate_ppmi(self) -> np.array:
holder = list()
for unq_ent, list_of_context_ent in enumerate(self.inverted_index):
top_k_sim = dict()
marginal_prob_of_target = self._marginal_probs[unq_ent]
statistical_info_of_cooccurrences = Counter(list_of_context_ent)
top_k_sim.setdefault(unq_ent, dict())
for context_ent, co_occuring_freq in statistical_info_of_cooccurrences.items():
joint_prob = co_occuring_freq / self._num_triples
marginal_prob_of_context = self._marginal_probs[context_ent]
denominator = marginal_prob_of_target * marginal_prob_of_context
PMI_val = np.log2(joint_prob) - np.log2(denominator)
if PMI_val <= 0:
continue
if len(top_k_sim[unq_ent]) <= self._topK:
top_k_sim[unq_ent][context_ent] = PMI_val.astype(np.float32)
else:
for k, v in top_k_sim[unq_ent].items():
if v < PMI_val:
top_k_sim[unq_ent][context_ent] = PMI_val
del top_k_sim[unq_ent][k]
break
context = np.array(list(top_k_sim[unq_ent].keys()), dtype=np.uint32)
sims = np.array(list(top_k_sim[unq_ent].values()), dtype=np.float32)
sims.shape = (sims.size, 1)
# sampled may contain dublicated variables
sampled = np.random.choice(len(self.inverted_index), self._topK)
# negatives must be disjoint from context of k.th vocabulary term and k.term itsel
negatives = np.setdiff1d(sampled, np.append(context, unq_ent), assume_unique=True)
holder.append((context, sims, negatives))
return holder
def get_similarities(self, inverted_index, num_triples, top_K):
"""
:param inverted_index:
:param num_triples:
:return: similarities data structure is a numpy array of dictionaries.
i.th element of the numpy array corresponds to i.th element in the vocabulary.
The dictionary stored in the i.th element:
Key: a vocabulary term
Val: PPMI value
"""
self.inverted_index = inverted_index
self._num_triples = num_triples
self._topK = top_K
self.calculate_marginal_probabilities()
similarities = self.calculate_ppmi()
return similarities
class Parser:
def __init__(self, logger=False, p_folder: str = 'not initialized', k=1):
self.path = 'uninitialized'
self.logger = logger
self.p_folder = p_folder
self.similarity_function = None
self.similarity_measurer = None
self.K = int(k)
self.logger = None
def set_logger(self, logger):
self.logger = logger
def set_similarity_function(self, f):
self.similarity_function = f
def set_similarity_measure(self, f):
self.similarity_measurer = f
def set_experiment_path(self, p):
self.p_folder = p
def set_k_entities(self, k):
self.K = k
def get_path_knowledge_graphs(self, path: str):
"""
:param path: str represents path of a KB or path of folder containg KBs
:return:
"""
KGs = list()
if os.path.isfile(path):
KGs.append(path)
else:
for root, dir, files in os.walk(path):
for file in files:
if '.nq' in file or '.nt' in file or 'ttl' in file or '.txt' in file:
KGs.append(path + '/' + file)
if len(KGs) == 0:
self.logger.info(
'{0} is not a path for a file or a folder containing any .nq or .nt formatted files'.format(path))
self.logger.info('Execution is terminated.')
exit(1)
return KGs
@staticmethod
def decompose_rdf(sentence):
flag = 0
components = re.findall('<(.+?)>', sentence)
#components = sentence.split()
if len(components) == 2:
s, p = components
remaining_sentence = sentence[sentence.index(p) + len(p) + 2:]
literal = remaining_sentence[:-1]
o = literal
flag = 2
elif len(components) == 4:
del components[-1]
s, p, o = components
flag = 4
elif len(components) == 3:
s, p, o = components
flag = 3
elif len(components) > 4:
s = components[0]
p = components[1]
remaining_sentence = sentence[sentence.index(p) + len(p) + 2:]
literal = remaining_sentence[:remaining_sentence.index(' <http://')]
o = literal
else:
## This means that literal contained in RDF triple contains < > symbol
raise ValueError()
o = re.sub("\s+", "", o)
s = re.sub("\s+", "", s)
p = re.sub("\s+", "", p)
return s, p, o, flag
@performance_debugger('Preprocessing')
def pipeline_of_preprocessing(self, f_name, extra_triples=[], bound=''):
inverted_index, num_of_rdf, similar_characteristics = self.inverted_index(f_name, bound, extra_triples=extra_triples)
holder = self.similarity_measurer().get_similarities(inverted_index, num_of_rdf, self.K)
return holder
@performance_debugger('Constructing Inverted Index')
def inverted_index(self, path, bound, extra_triples = []):
inverted_index = {}
vocabulary = {}
similar_characteristics = defaultdict(lambda: defaultdict(list))
num_of_rdf = 0
type_info = defaultdict(set)
sentences = ut.generator_of_reader(bound, self.get_path_knowledge_graphs(path), self.decompose_rdf)
for s, p, o in sentences:
ss = clean_url(s)
pp = clean_url(p)
oo = clean_url(o)
num_of_rdf += 1
# mapping from string to vocabulary
vocabulary.setdefault(ss, len(vocabulary))
vocabulary.setdefault(pp, len(vocabulary))
vocabulary.setdefault(oo, len(vocabulary))
inverted_index.setdefault(vocabulary[ss], []).extend([vocabulary[oo], vocabulary[pp]])
inverted_index.setdefault(vocabulary[pp], []).extend([vocabulary[ss], vocabulary[oo]])
inverted_index.setdefault(vocabulary[oo], []).extend([vocabulary[ss], vocabulary[pp]])
if 'rdf-syntax-ns#type' in p:
type_info[vocabulary[ss]].add(vocabulary[oo])
self.logger.info("Before")
self.logger.info('Number of RDF triples:\t{0}'.format(num_of_rdf))
self.logger.info('Number of vocabulary terms:\t{0}'.format(len(vocabulary)))
self.logger.info('Number of subjects with type information:\t{0}'.format(len(type_info)))
self.logger.info('Number of types :\t{0}'.format(len(set(itertools.chain.from_iterable(type_info.values())))))
#Adding extra triples
for s, p, o in extra_triples:
num_of_rdf += 1
# mapping from string to vocabulary
vocabulary.setdefault(s, len(vocabulary))
vocabulary.setdefault(p, len(vocabulary))
vocabulary.setdefault(o, len(vocabulary))
inverted_index.setdefault(vocabulary[s], []).extend([vocabulary[o], vocabulary[p]])
inverted_index.setdefault(vocabulary[p], []).extend([vocabulary[s], vocabulary[o]])
inverted_index.setdefault(vocabulary[o], []).extend([vocabulary[s], vocabulary[p]])
###################################################################
self.logger.info("After")
self.logger.info('Number of RDF triples:\t{0}'.format(num_of_rdf))
self.logger.info('Number of vocabulary terms:\t{0}'.format(len(vocabulary)))
self.logger.info('Number of subjects with type information:\t{0}'.format(len(type_info)))
self.logger.info('Number of types :\t{0}'.format(len(set(itertools.chain.from_iterable(type_info.values())))))
if num_of_rdf == 0:
self.logger.info('Exception at parsing dataset: No RDF triple processed.')
self.logger.info('Terminating')
exit(1)
assert list(inverted_index.keys()) == list(range(0, len(vocabulary)))
vocabulary = list(vocabulary.keys())
self.logger.info('Vocabulary being serialized. Note that ith vocabulary has ith. representation')
ut.serializer(object_=vocabulary, path=self.p_folder, serialized_name='vocabulary')
del vocabulary
inverted_index = list(inverted_index.values())
self.logger.info('Inverted Index being serialized. Note that ith vocabulary term has ith. document')
ut.serializer(object_=inverted_index, path=self.p_folder, serialized_name='inverted_index')
ut.serializer(object_=type_info, path=self.p_folder, serialized_name='type_info')
del type_info
return inverted_index, num_of_rdf, similar_characteristics
class PYKE(object):
def __init__(self, epsilon=0.01, logger=None):
self.epsilon = epsilon
self.ratio = list()
self.system_energy = 1
self.logger = logger
@staticmethod
def apply_hooke_s_law(embedding_space, target_index, context_indexes, PMS):
"""
:param embedding_space:
:param target_index:
:param context_indexes:
:param PMS:
:return:
"""
dist = embedding_space[context_indexes] - embedding_space[target_index]
# replace all zeros to 1 as a normalizer.
dist[dist == 0] = 0.01
# replace all
pull = dist * PMS
total_pull = np.sum(pull, axis=0)
return total_pull, np.abs(dist).sum()
@staticmethod
def apply_inverse_hooke_s_law(embedding_space, target_index, repulsive_indexes, omega):
"""
:param embedding_space:
:param target_index:
:param repulsive_indexes:
:param negative_constant:
:return:
"""
# calculate distance from target to repulsive entities
dist = embedding_space[repulsive_indexes] - embedding_space[target_index]
# replace all zeros to 1
dist[dist == 0] = 0.01
with warnings.catch_warnings():
try:
total_push = -omega * np.reciprocal(dist).sum(axis=0)
# replace all zeros to 1 if needed
# total_push[total_push == 0] = 0.01
except RuntimeWarning as r:
print(r)
print("Unexpected error:", sys.exc_info()[0])
exit(1)
return total_push, np.abs(dist).sum()
def go_through_entities(self, e, holder, omega):
sum_pos_sem_dist = 0
sum_neg_sem_dist = 0
for target_index in range(len(e)):
indexes_of_attractive, pms_of_contest, indexes_of_repulsive = holder[target_index]
pull, p = self.apply_hooke_s_law(e, target_index, indexes_of_attractive, pms_of_contest)
sum_pos_sem_dist += p
push, n = self.apply_inverse_hooke_s_law(e, target_index, indexes_of_repulsive,
omega)
sum_neg_sem_dist += n
total_effect = (pull + push) * self.system_energy
e[target_index] = e[target_index] + total_effect
semantic_distance = dict()
semantic_distance['pos'] = sum_pos_sem_dist
semantic_distance['neg'] = sum_neg_sem_dist
return e, semantic_distance
@performance_debugger('Generating Embeddings:')
def pipeline_of_learning_embeddings(self, *, e, max_iteration, energy_release_at_epoch, holder, omega):
for epoch in range(max_iteration):
self.logger.info('EPOCH: {0}'.format(epoch))
previous_f_norm = LA.norm(e, 'fro')
e, semantic_dist = self.go_through_entities(e, holder, omega)
self.system_energy = self.system_energy - energy_release_at_epoch
self.logger.info(
'Distance:{0}\t System Energy:{1} \t Distance Ratio:{2}'.format(semantic_dist, self.system_energy,
semantic_dist['pos'] / semantic_dist[
'neg']))
e = np.nan_to_num(e)
with warnings.catch_warnings():
try:
e = (e - e.min(axis=0)) / (e.max(axis=0) - e.min(axis=0))
except RuntimeWarning as r:
print(r)
print(e.mean())
print(np.isnan(e).any())
print(np.isinf(e).any())
exit(1)
new_f_norm = LA.norm(e, 'fro')
if self.equilibrium(epoch, previous_f_norm, new_f_norm):
e = np.nan_to_num(e)
break
return | pd.DataFrame(e) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
| tm.assert_panel_equal(result, expected) | pandas.util.testing.assert_panel_equal |
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.integer import (
Int8Dtype,
UInt32Dtype,
)
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": pd.array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")},
index= | pd.Index(["a", "b"], name="A") | pandas.Index |
from __future__ import annotations
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import gamma, exponnorm
sns.set()
BASE_PATH = Path('..', 'data', 'experimental')
INPUT_PATHS = [
BASE_PATH / 'control.csv',
BASE_PATH / 'tmz.csv',
]
CURVE_PARAMETERS = { # estimated from running "clovars fit" on "control.csv" and "tmz.csv"
'control_division_parameters': {
'type': gamma,
'loc': 16.23,
'scale': 2.84,
'a': 3.32,
},
'tmz_division_parameters': {
'type': exponnorm,
'loc': 12.72,
'scale': 8.50,
'K': 2.87,
},
'tmz_death_parameters': {
'type': exponnorm,
'loc': 55.09,
'scale': 23.75,
'K': 2.93,
},
}
def main(
input_paths: list[Path],
curve_parameters: dict[str, dict[str, float]],
) -> None:
"""Main function of this script."""
dfs = []
for path in input_paths:
df = pd.melt(pd.read_csv(path, index_col=None), var_name='type', value_name='hours')
df['treatment'] = path.stem
dfs.append(df)
data = | pd.concat(dfs, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Step 2X Data generation: Generate ground truth for stock markets based on OHLC data
License_info: ISC
ISC License
Copyright (c) 2020, <NAME>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# Futures
# Built-in/Generic Imports
# Libs
import argparse
import os
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.nonparametric.smoothers_lowess import lowess
import numpy as np
from scipy.ndimage.interpolation import shift
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import unittest
from unittest import TestCase
import env
import data
from eventgraphs import EventGraph, BadInputError
import pandas as pd
from pandas.testing import assert_frame_equal
DATASETS = [data.directed,
data.directed_hyper,
data.directed_hyper_single,
data.undirected_hyper,
data.extra_columns,
data.string_labels]
class IOTests(TestCase):
"""
Tests the input and output functionality of EventGraph class.
"""
def from_pandas(self, dataset):
""""""
df = pd.DataFrame(dataset)
EG = EventGraph.from_pandas_eventlist(events=df,
graph_rules='teg')
self.assertTrue(True)
def from_dict(self, dataset):
""""""
EG = EventGraph.from_dict_eventlist(events=dataset,
graph_rules='teg')
self.assertTrue(True)
def from_save_and_load(self, dataset):
""""""
EG = EventGraph.from_dict_eventlist(events=dataset,
graph_rules='teg')
EG.build()
EG.save('test.json')
LG = EventGraph.from_file('test.json')
# Cannot compare dataframes where columns are in different order
LG.eg_edges = LG.eg_edges[EG.eg_edges.columns]
assert_frame_equal(LG.events, EG.events)
| assert_frame_equal(LG.eg_edges, EG.eg_edges) | pandas.testing.assert_frame_equal |
from __future__ import division
# import libraries
from datetime import datetime, timedelta
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import plotly as py
import plotly.offline as pyoff
import plotly.graph_objs as go
#inititate Plotly
pyoff.init_notebook_mode() #it generates offline graph in jupyter notebook
#load our data from CSV
# tx_data = pd.read_csv("data.csv")
tx_data = pd.read_csv('data.csv',header=0,encoding = 'unicode_escape') #read csv file and other than ascii mode enabled extra type of characters also
tx_data.head(10) #it prints
#convert the string date field to datetime
tx_data['InvoiceDate'] = pd.to_datetime(tx_data['InvoiceDate'])
tx_data['InvoiceDate'].describe()
# tx_data.head()
#we will be using only UK data
tx_uk = tx_data.query("Country=='United Kingdom'").reset_index(drop=True)
tx_user = pd.DataFrame(tx_data['CustomerID'].unique())
tx_user.columns = ['CustomerID']
tx_uk.head(10)
tx_max_purchase = tx_uk.groupby('CustomerID').InvoiceDate.max().reset_index()
tx_max_purchase.columns = ['CustomerID','MaxPurchaseDate']
tx_max_purchase['Recency'] = (tx_max_purchase['MaxPurchaseDate'].max() - tx_max_purchase['MaxPurchaseDate']).dt.days
tx_user = pd.merge(tx_user, tx_max_purchase[['CustomerID','Recency']], on='CustomerID')
tx_user.head()
tx_user.describe()
plot_data = [ go.Histogram(x=tx_user['Recency'])]
plot_layout = go.Layout( title='Recency')
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
from sklearn.cluster import KMeans
sse={}
tx_recency = tx_user[['Recency']]
for k in range(1, 10):
kmeans = KMeans(n_clusters=k, max_iter=1000).fit(tx_recency)
tx_recency["clusters"] = kmeans.labels_
sse[k] = kmeans.inertia_
plt.figure()
plt.plot(list(sse.keys()), list(sse.values()))
plt.xlabel("Number of cluster")
plt.show()
kmeans = KMeans(n_clusters=4)
kmeans.fit(tx_user[['Recency']])
tx_user['RecencyCluster'] = kmeans.predict(tx_user[['Recency']])
#function for ordering cluster numbers
def order_cluster(cluster_field_name, target_field_name,df,ascending):
new_cluster_field_name = 'new_' + cluster_field_name
df_new = df.groupby(cluster_field_name)[target_field_name].mean().reset_index()
df_new = df_new.sort_values(by=target_field_name,ascending=ascending).reset_index(drop=True)
df_new['index'] = df_new.index
df_final = pd.merge(df,df_new[[cluster_field_name,'index']], on=cluster_field_name)
df_final = df_final.drop([cluster_field_name],axis=1)
df_final = df_final.rename(columns={"index":cluster_field_name})
return df_final
tx_user = order_cluster('RecencyCluster', 'Recency',tx_user,False)
tx_frequency = tx_uk.groupby('CustomerID').InvoiceDate.count().reset_index()
tx_frequency.columns = ['CustomerID','Frequency']
#add this data to our main dataframe
tx_user = pd.merge(tx_user, tx_frequency, on='CustomerID')
#plot the histogram
plot_data = [
go.Histogram(
x=tx_user.query('Frequency < 1000')['Frequency']
)
]
plot_layout = go.Layout(
title='Frequency'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
kmeans = KMeans(n_clusters=6)
kmeans.fit(tx_user[['Frequency']])
tx_user['FrequencyCluster'] = kmeans.predict(tx_user[['Frequency']])
#order the frequency cluster
tx_user = order_cluster('FrequencyCluster', 'Frequency',tx_user,True)
#see details of each cluster
tx_user.groupby('FrequencyCluster')['Frequency'].describe()
tx_uk['Revenue'] = tx_uk['UnitPrice'] * tx_uk['Quantity']
tx_revenue = tx_uk.groupby('CustomerID').Revenue.sum().reset_index()
#merge it with our main dataframe
tx_user = | pd.merge(tx_user, tx_revenue, on='CustomerID') | pandas.merge |
import argparse, time,re, os,csv,functools, signal,sys, json
import logging,datetime, threading,concurrent.futures
from logging import handlers
from time import gmtime, strftime
from urllib.parse import urlparse
from os.path import splitext
import pandas as pd
import numpy as np
# Local Imports
from Lib.GCS.wrapper import Wrapper
from Lib.GCS.origin_settings import Origin_Settings
from Lib.GCS.log import ConsoleLogging
def ArgsParser():
parser = argparse.ArgumentParser(description='',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--switchKey', type=str, help='Account_ID to Query for multi account management (switch key)',
required=False)
parser.add_argument('--verbose', action='store_true', help='Turn on Verbose Mode.')
parser.add_argument('--section', type=str, help='EdgeRc section to be used.',
required=False,default='papi')
parser.add_argument('--type', type=str.lower, choices=['as','os','har'], help='Type of report to be done [account-summary,offload,http-archive]]',
required=False,default='as')
parser.add_argument('--cpcodes', nargs='+', type=int, help='List of cpcodes to query. Used only in Offload Analysis.',
required=False)
parser.add_argument('--start', type=str, help='Report Start date in format YYYY-MM-DD", if not provided default is start of last month. Used only in Offload Analysis.',
required=False)
parser.add_argument('--end', type=str, help='Report Start date in format YYYY-MM-DD", if not provided default is start of last month. Used only in Offload Analysis.',
required=False)
parser.add_argument('--domain', type=str, help='Main Domain to be reviewed in HAR, usually it will be the same as the page view URL. Used only in Har Analysis.',
required=False)
parser.add_argument('--first-parties', nargs='+', type=str, help='List of first party domains --domain will be appended to this list. If only one domain is in quesion, --domain is all you need. Used only in Har Analysis.',
required=False)
parser.add_argument('--file', type=str, help='File location to be analysed. Used only in Har Analysis.',
required=False)
parser.add_argument('--groupby', type=str.lower, choices=['ext','url'], help='Used only in Offload Analysis. ',
required=False,default='ext')
args = vars(parser.parse_args())
return parser, args
class Aggregator:
def __init__(self,console,args,section_name):
self.args = None
self.parser = None
self.maxThreads = 5
self.outputdir = "None"
self.verbose = args['verbose']
self.log = console.log
self.wrapper = Wrapper(self.log,section_name)
self.accountId = None
self.wrapper.account = None
self.dfs = {}
self.startDate = None
self.endDate = None
self.accountName = None
self.productMap = None
self.reportType = "as"
self.groupby = args['groupby']
signal.signal(signal.SIGINT, self.signal_handler)
def signal_handler(self,sig, frame):
self.clear_cache()
self.log.critical("Forced Exit... Bye!..")
sys.exit(0)
def _validateDate(self, date):
"""Returns False if input date does not follow YYYY-MM-DD.
Keyword arguments:
date
Return type:
Boolean
"""
try:
datetime.datetime.strptime(str(date), '%Y-%m-%d')
return True
except ValueError:
return False
# raise ValueError("Incorrect data format, should be YYYY-MM-DD")
def createFolder(self,directoryName):
"""Creates directores to store outputs, takes the directory name. This value most of the time will be the
account Name.
Keyword arguments:
directoryName
Return type:
None
"""
self.outputdir = 'Reports'
# Create Audit Folder
try:
os.stat(self.outputdir)
except:
os.mkdir(self.outputdir)
self.outputdir = self.outputdir+'/'+directoryName.replace(' ','_')+'/'
# Create Account Folder under Audit
try:
os.stat(self.outputdir)
except:
os.mkdir(self.outputdir)
self.outputdir = self.outputdir + str(datetime.datetime.utcfromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')).replace(' ','_').replace(':','.') + '-'
def _getProducts(self, contractId):
"""
Return the set of products within a contract as a comma seperated list
Keyword arguments:
contractId
Return type:
list
"""
products = self.wrapper.getProducts(contractId)
productNames = []
if 'products' in products:
for product in products['products']['items']:
productNames.append(product['productName'])
new_row = {
'Product_ID':product['productId'],
'Product_Name':product['productName']
}
if len(productNames) > 1:
return ",".join(productNames)
else:
return []
def getAccountDetails(self):
"""
Gets Account Name from ID, also saves the groups for later functions.
Keyword arguments:
None
Return type:
Boolean, but also stores dataframe in self.dfs
"""
if args['switchKey']:
self.accountId = args['switchKey']
self.wrapper.account = args['switchKey']
self.groups = self.wrapper.getGroups()
if 'incidentId' in self.groups:
self.log.error('Account Not Found or insufficient privileges to complete the operation. Try "--section sectionName" o change edgerc section')
return False
if not args['switchKey']:
self.accountId = self.groups['accountId'][4:]
# self.wrapper.account = self.groups['accountId'][4:]
self.log.info("Account ID: {0}".format(self.accountId))
self.accountName = self.groups['accountName']
self.log.info("Account Name: {0}".format(self.accountName))
csv_file_path = self.createFolder(self.groups['accountName'])
columns = ["Account_Id", "Account_Name"]
df_acc= pd.DataFrame(columns=columns)
new_row = {
'Account_Id':self.groups['accountId'][4:],
'Account_Name':self.groups['accountName']
}
df_acc=df_acc.append(new_row, ignore_index=True)
self.dfs['account'] = df_acc
self._readProductMap()
return True
def accountSummary(self):
"""
Main function for AS report type, orchestrates function execution.
Keyword arguments:
None
Return type:
None
"""
self.log.info("Creating Contract summary table")
self.printContracts()
self.log.info("Creating Groups summary table")
self.printGroups()
self.log.info("Creating CP_Code summary table")
self.printCPcodes()
self.log.info("Creating edge host name summary table")
self.printEdgeHostNames()
self.log.info("Creating Application Security tables")
self.printAppSec()
if args['verbose']:
self.log.info("Creating Property summary. (It may take a while) ")
else:
self.log.info("Creating Property summary. (It may take a while, view more with '--verbose') ")
self.printPropertiesDetails()
self.log.info("Creating Certificate Table.")
self.getEnrollments()
self.log.info("Creating Summary by Hostname")
self.presentation()
self.log.info("Writing Files...")
self._writeFiles()
self.log.info("Report successfull, output can be found here:'Reports/{0}/'".format(self.accountName))
def printContracts(self):
"""
Gets Contracts within Account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
self.log.info("Creating Contracts table.")
columns = ["Contract_ID" , "Contract_Name", "Products"]
df_ctr= pd.DataFrame(columns=columns)
contracts = self.wrapper.getContractNames()
for contract in contracts['contracts']['items']:
products = self._getProducts(contract['contractId'])
new_row = {
'Contract_ID': contract['contractId'][4:],
'Contract_Name':contract['contractTypeName'],
'Products':products
}
df_ctr=df_ctr.append(new_row, ignore_index=True)
self.dfs['contracts'] = df_ctr
def printGroups(self):
"""
Gets Groups in account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
self.log.info("Creating Groups table.")
columns = ["Group_ID", "Group_Name","Parent"]
df_grp = pd.DataFrame(columns=columns)
for group in self.groups['groups']['items']:
grp_id = int(group['groupId'][4:])
grp_name = group['groupName']
grp_parent = None
if 'parentGroupId' in group:
grp_parent = int(group['parentGroupId'][4:])
new_row = {
'Group_ID': grp_id,
'Group_Name':grp_name,
'Parent':grp_parent
}
df_grp=df_grp.append(new_row, ignore_index=True)
self.dfs['groups'] = df_grp
def printEdgeHostNames(self):
"""
Gets EdgeHostnames in account
Keyword arguments:
None
Return type:
None, but stores dataframe in self.dfs
"""
lst_eh = []
columns = ["Group_ID", "Contract_ID", "Edge_Host_ID", "Edge_Host_Name", "Edge_Host_Domain_Suffix", "Secure", "IPVersion","Product_ID","Map","Slot"]
df_eh = pd.DataFrame(columns=columns)
contracts = []
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for group in self.groups['groups']['items']:
groupId = group['groupId']
executor.submit(self.GroupsWorker,'edgehost',group,lst_eh,contracts)
df_eh= df_eh.append(lst_eh, ignore_index=True)
self.dfs['edgehostnames'] = df_eh
def PropertyWorker(self,list_grp_configs,list_grp_behaviors,config_details):
"""
Gets Property details,
Keyword arguments:
list_grp_configs
list_grp_behaviors
config_details
Return type:
None, but stores dataframe in self.dfs
"""
args = None
args = ['Prod_Version','Staging_Version', 'Latest_Version']
if 'propertyName' in config_details:
self.log.debug("Importing data for property: '{0}'".format(config_details['propertyName']))
# Assign values to variables here for readability and will be used in rest of function.
groupId = config_details['groupId']
contractId = config_details['contractId']
propertyId = config_details['propertyId']
productionVersion = config_details['productionVersion']
stgVersion = config_details['stagingVersion']
latestVersion = config_details['latestVersion']
productId = None
new_row = {
'Config_Name': config_details['propertyName'],
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'Property_ID': int(propertyId[4:]),
'Prod_Version': productionVersion,
'Staging_Version': stgVersion,
'Latest_Version': latestVersion,
'Product': productId
}
if args:
for config_env in args:
config_version = new_row[config_env]
if config_version is not None:
get_version = self.wrapper.getVersionDetails(propertyId,groupId,contractId,str(config_version))
if 'versions' in get_version:
for item in get_version['versions']['items']:
new_row[config_env + '_Updated_User'] = item['updatedByUser']
new_row[config_env + '_Updated_Time'] = item['updatedDate']
if productId == None:
productId = item['productId'][4:]
else:
new_row[config_env + '_Updated_User'] = 'No_' + config_env
new_row[config_env + '_Updated_Time'] = 'No_' + config_env
new_row['Product'] = productId
version = new_row['Latest_Version']
if ('Prod_Version' in new_row) and (new_row['Prod_Version'] is not None):
version = new_row['Prod_Version']
else:
if ('Staging_Version' in new_row) and (new_row['Staging_Version'] is not None):
version = new_row['Staging_Version']
new_row['Hostnames'] = self.getPropertyHostDetails(new_row['Group_ID'],new_row['Contract_ID'],new_row['Property_ID'], str(version))
new_row['Origins'] = self.getPropertyOriginDetails(new_row['Group_ID'],new_row['Contract_ID'],new_row['Property_ID'], str(version))
new_row['Behaviors'] = self.getBehaviorDetails()
new_row['CP_Codes'] = '['+self.getCPCodeDetails()+']'
property_behaviors = new_row['Behaviors']
list_grp_configs.append(new_row)
if productionVersion is not None:
propertyVersion = productionVersion
elif stgVersion is not None:
propertyVersion = stgVersion
else :
propertyVersion = latestVersion
available_behaviors = self.wrapper.getavailableBehavior(propertyId, str(propertyVersion),contractId, groupId)
if 'behaviors' in available_behaviors:
for b in available_behaviors['behaviors']['items']:
enabled = False
if b['name'] in property_behaviors:
enabled = True
new_row = {
'Config_Name': config_details['propertyName'],
'Behaviors': b['name'],
'Enabled': enabled
}
list_grp_behaviors.append(new_row)
return
def GroupsWorker(self, workType,group,main_list=None,second_list=None):
"""
Worker for multithreads for property functions, cpcode functions, edgehosts due to high number of groups per
account,
Keyword arguments:
workType <= Type of function to be execute [property, cpcode , edgehosts]
group <= Dataframe containing list of account groups
main_list <= list passed down by maint thread to append results
second_list <= secondary list passed down by main thread to append results
Return type:
None
"""
groupId = group['groupId']
if 'contractIds' in group:
for contractId in group['contractIds']:
if workType == 'properties':
location_result = self.wrapper.getProperties(groupId, contractId)
if 'properties' in location_result:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for config_details in location_result['properties']['items']:
executor.submit(self.PropertyWorker,main_list,second_list,config_details)
elif workType == 'cpcodes':
cpcodes = self.wrapper.getCPCodes(groupId, contractId)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for cp in cpcodes['cpcodes']['items']:
products = []
for product in cp['productIds']:
products.append(product[4:])
new_row = {
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'CP_Code_ID': int(cp['cpcodeId'][4:]),
'CP_Code_Name': cp['cpcodeName'],
'CP_Code_Products': "|".join(products)
}
if new_row not in main_list:
self.log.debug("Fetched data for CPcode: '{0}'".format(cp['cpcodeId'][4:]))
main_list.append(new_row)
elif workType == 'edgehost':
if 'contractIds' in group:
for contractId in group['contractIds']:
if contractId in second_list:
break
second_list.append(contractId)
edgeHostNames = self.wrapper.getEdgeHostNames(groupId, contractId,'hapi')
for edgeHostName in edgeHostNames['edgeHostnames']:
slot = None
if 'slotNumber' in edgeHostName:
slot = edgeHostName['slotNumber']
productID = None
if 'productId' in edgeHostName:
productID = edgeHostName['productId']
IPv = None
if 'ipVersionBehavior' in edgeHostName:
IPv = edgeHostName['ipVersionBehavior']
eMap = None
if 'map' in edgeHostName:
eMap = edgeHostName['map']
new_row = {
'Group_ID': int(groupId[4:]),
'Contract_ID': contractId[4:],
'Edge_Host_ID': edgeHostName['edgeHostnameId'],
'Edge_Host_Name': edgeHostName['recordName']+'.'+edgeHostName['dnsZone'],
"Edge_Host_Domain_Suffix":edgeHostName['dnsZone'],
"Secure":edgeHostName['securityType'],
"IPVersion":IPv,
"Product_ID":productID,
"Map":eMap,
"Slot":slot
}
main_list.append(new_row)
self.log.debug("Fetched configs for group: '{0}'".format(groupId[4:]))
return None
def printCPcodes(self):
"""
orchestrates mutlithreading by using the GroupsWorker function to populate CPcode data
Keyword arguments:
None
Return type:
None
"""
lst_cpcodes = []
columns = ["Group_ID", "Contract_ID", "CP_Code_ID", "CP_Code_Name", "CP_Code_Products"]
df_cpcodes = pd.DataFrame(columns=columns)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxThreads) as executor:
for group in self.groups['groups']['items']:
groupId = group['groupId']
executor.submit(self.GroupsWorker,'cpcodes',group,lst_cpcodes)
df_cpcodes= df_cpcodes.append(lst_cpcodes, ignore_index=True)
self.dfs['cpcodes'] = df_cpcodes
def printPropertiesDetails(self, *args):
"""
orchestrates mutlithreading by using the GroupsWorker function to populate property data
Return type:
None
"""
self.log.debug('Start time is {0}'.format(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
self.log.debug('generating config data.....')
columns = [
"Config_Name",
"Group_ID",
"Contract_ID",
"Property_ID",
"Prod_Version",
"Staging_Version",
"Latest_Version",
"Product",
"Prod_Version_Updated_User",
"Prod_Version_Updated_Time",
"Staging_Version_Updated_User",
"Staging_Version_Updated_Time",
"Latest_Version_Updated_User",
"Latest_Version_Updated_Time",
"Hostnames",
"Origins",
"Behaviors",
"CP_Codes"
]
list_properties = []
list_behavior = []
df_property = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from functools import partial
from types import SimpleNamespace
from warnings import warn
from numpy import isclose
from pandas import DataFrame, Series
from pandas.core.dtypes.common import is_numeric_dtype
from data_frames import to_nested_dicts, the_only_one
from signature_scoring.evaluation import summarize_across_cell_lines, evaluation_summary
from signature_scoring.scoring_functions import ScoringFunction
def extract_single_score(g: DataFrame):
return the_only_one(g.score)
def reevaluate(scores_dict_by_cell, scoring_func: ScoringFunction, subtypes_top=None, subtypes_scores=None, **kwargs):
# TODO: support scoring functions with no cell grouping
# if scoring_func.grouping:
# pass
summarize_test = partial(evaluation_summary, **kwargs)
data = summarize_across_cell_lines(
summarize_test, scores_dict_by_cell,
subtypes_top=subtypes_top, subtypes_scores=subtypes_scores
)
return data
def extract_scores_from_result(
result: Series,
scores_as_series=True, are_grouped_by_cell=True
) -> DataFrame:
if scores_as_series:
def row_details(scores: Series) -> dict:
yield {'score': scores}
else:
def row_details(scores: Series) -> dict:
for row in scores.reset_index().itertuples():
yield row._asdict()
if are_grouped_by_cell:
def iter_groups(scores: SimpleNamespace):
for cell_id, cell_scores in scores.__dict__.items():
yield {'cell_id': cell_id}, cell_scores
else:
def iter_groups(scores: SimpleNamespace):
yield {}, scores
data = []
for func, func_scores in result.iteritems():
for group_metadata, group_scores in iter_groups(func_scores):
# group: indications / contra / controls
for group, score_series in group_scores.__dict__.items():
for row in row_details(score_series):
data.append({
'func': func,
'group': group,
**row,
**group_metadata
})
return DataFrame(data)
def reevaluate_benchmark(old_benchmark: DataFrame, reevaluate_kwargs, verbose=True, keep_scores=True) -> DataFrame:
# TODO: support scoring functions with no cell grouping
scores = extract_scores_from_result(old_benchmark['meta:Scores'])
if not any(scores_series.score.any() for scores_series in scores.score):
warn(f'Skipping re-evaluation for {", ".join(old_benchmark.index)}: no scores in the original result')
# return just empty df with same columns
return DataFrame()
scores_dict_by_func_cell_group_subtype = to_nested_dicts(
scores, ['func', 'cell_id', 'group'],
extract=extract_single_score
)
del scores
data = []
for func, scores_dict_by_cell_group in scores_dict_by_func_cell_group_subtype.items():
reevaluate_kwargs['aggregate'] = None # already aggregated
result = reevaluate(
scores_dict_by_cell_group,
func,
**reevaluate_kwargs
)
data.append({**result, **{'Func': func}})
reevaluated_benchmark = DataFrame(data).set_index('Func')
# preserve execution time information
reevaluated_benchmark['Time'] = old_benchmark['Time']
if verbose:
old_columns = set(old_benchmark.columns)
new_columns = set(reevaluated_benchmark.columns)
removed_metrics = old_columns - new_columns
if removed_metrics:
print(removed_metrics, 'metrics removed')
added_metrics = new_columns - old_columns
if added_metrics:
print(added_metrics, 'metrics added')
old_benchmark_reordered = old_benchmark.loc[reevaluated_benchmark.index]
for column in (old_columns & new_columns):
if not old_benchmark_reordered[column].equals(reevaluated_benchmark[column]):
if (
| is_numeric_dtype(old_benchmark_reordered[column].dtype) | pandas.core.dtypes.common.is_numeric_dtype |
import os
import sys
import datetime
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report, precision_recall_fscore_support
from absl import app
from bin.util import *
from bin.modules import *
from bin.inception_v4 import inception_v4
from bin.all_conv_cnn import all_conv
from bin.inception_se import inception_v4_se
from bin.resnet import resnet
from lib.config import REPLAYS_PARSED_DIR, REPLAY_DIR, REPO_DIR, STANDARD_VERSION
def main():
### define constants for training
# supress tf-warning for RAM-usage
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# declare all used sample versions
versions = ['1_3d_10sup', '1_3d', '1_3d_15sup']
# declare batching, how many samples are used for training and the number of training epochs
batch_size = 50
cap = 30000
epochs = 50
# declare if data augmentation is used
data_augmentation = False
# set train/test-split and number of batches
split = int(cap*0.9)
batches = int(split/batch_size)
test_batches = int((cap-split)/batch_size)
# set start time to calculate duration of training
now = datetime.datetime.now()
# set learning rate
lr = 0.005
# initialize directory in which the logs are going to be saved
tensorboard_dir = os.path.join(REPO_DIR, 'tensorboard_logs', 'Inception_v4_noAug_noWeight', 'LearningRate_'+str(lr) +'_SampleSize_' + str(cap) + '_' +str(now.year)+'-'+str(now.month)+'-'+str(now.day)+'-'+str(now.hour)+'-'+str(now.minute))
fil = [[], [], []]
files = [[], [], []]
supp = [[], [], []]
# get files and filter them by supply for every specified version
for n, v in enumerate(versions):
fil[n] = build_file_array(type='csv', version=[v])
file, su = filter_close_matchups(fil[n], 15, [v], 'csv')
files[n] = file
supp[n] = su
# get remaining files
files_ = []
supp_ = []
for i in range(3):
files_ = np.append(files_, files[i])
supp_ = np.append(supp_, supp[i])
# f is the array of file-paths that are used to evaluate the accuracy by supply
# s is the related supplies for each file in f
f = files_[cap:]
s = supp_[cap:]
# initialize the datasets and declare that the input files are gzip-compressed
train_dataset = tf.data.TextLineDataset(files_[:split-1], compression_type='GZIP')
test_dataset = tf.data.TextLineDataset(files_[split:cap], compression_type='GZIP')
# every sample consists of 91731 lines
# every line is read individually by the TextLineDataset-Reader
# therefore we need to batch them all together and parse them respectively
train_dataset = train_dataset.batch(13*84*84+3)
test_dataset = test_dataset.batch(13*84*84+3)
# if data-augmentation is True we augment the data in parse_func
if data_augmentation:
train_dataset = train_dataset.map(parse_func, num_parallel_calls=16)
test_dataset = test_dataset.map(parse_func, num_parallel_calls=16)
# else we do not need augmentation
else:
train_dataset = train_dataset.map(parse_func_no_aug, num_parallel_calls=16)
test_dataset = test_dataset.map(parse_func_no_aug, num_parallel_calls=16)
train_dataset = train_dataset.batch(batch_size)
test_dataset = test_dataset.batch(batch_size)
# the flat function is only needed if we add samples by augmentation
if data_augmentation:
train_dataset = train_dataset.map(flat_func, num_parallel_calls=16)
test_dataset = test_dataset.map(flat_func, num_parallel_calls=16)
# to get accuracies for each supply difference
# we need to delare another dataset and parse it like the previous ones
validation_dataset = tf.data.TextLineDataset(f, compression_type='GZIP')
validation_dataset = validation_dataset.batch(13*84*84+3)
validation_dataset = validation_dataset.map(parse_func_no_aug, num_parallel_calls=16)
validation_dataset = validation_dataset.batch(1)
# prefetch(1) allows the datasets to load multiple batches in parallel
train_dataset = train_dataset.prefetch(1)
test_dataset = test_dataset.prefetch(1)
validation_dataset = validation_dataset.prefetch(1)
# create an iterator over the dataset and declare initialization methods
iter = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
train_init_op = iter.make_initializer(train_dataset)
test_init_op = iter.make_initializer(test_dataset)
val_init_op = iter.make_initializer(validation_dataset)
# get features and labels from iterator
features, labels = iter.get_next()
# give the features to the networks
# the one that is going to be evaluated is not in comments
# returns the predictions
# y_ = inception_v4_se(features)
y_ = resnet(features)
# y_ = all_conv(features)
# y_ = inception_v4(features)
# get the ground-truth labels
y = labels
# apply softmax to the predictions
softmax = tf.nn.softmax(y_)
#get trainable params
para = get_params(tf.trainable_variables())
print(para)
# loss without weights
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_, labels=y))
# helper to return labels and predictions to session
results = tf.argmax(y,1), tf.argmax(softmax, 1)
# add an optimiser
optimiser = tf.train.AdamOptimizer(learning_rate=lr).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(softmax, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# setup the initialisation operator
init_op = tf.global_variables_initializer()
# setup the save and restore functionality for variables
saver = tf.train.Saver(max_to_keep=50)
with tf.Session() as sess:
# declare a last stop if you want to continue training an existing model
last_stop = None
# set a save path from with the trained model will be loaded
save_path_ = os.path.join(tensorboard_dir)
# if last_stop is declared try to import the meta graph and variable values from the saved model
if not last_stop == None:
saver = tf.train.import_meta_graph(os.path.join(save_path_, 'model_at_epoch-'+str(last_stop)+'.meta'))
# if the specified checkpoint does not exist load the latest checkpoint
if tf.train.checkpoint_exists(os.path.join(save_path_, 'model_at_epoch-'+str(last_stop))):
saver.restore(sess, os.path.join(save_path_, 'model_at_epoch-'+str(last_stop)))
else:
saver.restore(sess, tf.train.latest_checkpoint(save_path_))
# set the epoch range to an array of "last stop" to "end of training"
epoch_range = np.arange(last_stop, epochs)
tensorboard_dir = save_path_
else:
# if we start a new training the epoch range is 0 to "end of training"
epoch_range = np.arange(0, epochs)
# initialize all variables randomly
sess.run(init_op)
# initialize the FileWriter and save the session graph to the log data
summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
# initialize arrays to save predictions and corresponding labels, as well as starting time
labels_by_epoch = []
pred_by_epoch = []
timestamp = datetime.datetime.now()
try:
for i in epoch_range:
# get timestamp
epoch_timestamp = datetime.datetime.now()
# initialize training dataset
sess.run(train_init_op)
# initialize logging variables
losses = 0
accs = 0
taccs = 0
n= 0
pred = []
pred_test = []
gt = []
gt_test = []
# while there are samples to fetch in the dataset -> keep running
while True:
try:
# run the optimizer function for the current batch
# and return cross entropy, resulting labels and predictions and accuracy
_, loss, res, acc= sess.run([optimiser, cross_entropy, results, accuracy])
# save gt and labels
gt = np.append(gt, res[0])
pred = np.append(pred, res[1])
# calculate loss and accuracy relative to number of batches
losses += loss / batches
accs += acc / batches
sys.stdout.write("\rBatch %2d of %2d" % (n+1, batches))
sys.stdout.flush()
n += 1
except tf.errors.OutOfRangeError:
break
# initialize test dataset
sess.run(test_init_op)
# while there are samples to fetch in the dataset -> keep running
while True:
try:
# run accuracy and return gt and labels
res_test, tacc = sess.run([results, accuracy])
# compute metrics
taccs += tacc/test_batches
gt_test = np.append(gt_test, res_test[0])
pred_test = np.append(pred_test, res_test[1])
except tf.errors.OutOfRangeError:
break
# save labels and predictions
labels_by_epoch = np.append(labels_by_epoch, [gt])
pred_by_epoch = np.append(pred_by_epoch, [pred])
# evaluate metrics precision, recall and f1-score for training results
p, rc, f1, _ = precision_recall_fscore_support(gt, pred, average='weighted')
# evaluate metrics precision, recall and f1-score for test results
pt, rct, ft, _ = precision_recall_fscore_support(gt_test, pred_test, average='weighted')
# calculate time spent for this epoch
time_spent = datetime.datetime.now() - epoch_timestamp
time_to_sec = time_spent.seconds
print(" --- Iter: {:-2}, Loss: {:-15.2f} --- TRAINING --- Precision: {:.4f}, Recall: {:.4f}, F1-Score: {:.4f}, Top-1-Error: {:.2f} --- TEST DATA --- Precision: {:.4f}, Recall: {:.4f}, F1-Score: {:.4f}, Top-1-Error: {:.2f} --- Calculated in {} seconds".format(i+1, losses, p, rc, f1, 1-accs, pt, rct, ft, 1-taccs, time_spent.seconds))
# write all metrics into log data for this step
train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=losses),
tf.Summary.Value(tag='train_precision', simple_value=p),
tf.Summary.Value(tag='test_precision', simple_value=pt),
tf.Summary.Value(tag='train_recall', simple_value=rc),
tf.Summary.Value(tag='test_recall', simple_value=rct),
tf.Summary.Value(tag='train_f1_score', simple_value=f1),
tf.Summary.Value(tag='test_f1_score', simple_value=ft),
tf.Summary.Value(tag='train_accuracy', simple_value=accs),
tf.Summary.Value(tag='test_accuracy', simple_value=taccs),
tf.Summary.Value(tag='time_spent_seconds', simple_value=time_to_sec)])
summary_writer.add_summary(summary=train_summary, global_step=i)
summary_writer.flush()
# save model for this step
save_path = saver.save(sess, os.path.join(tensorboard_dir, "model_at_epoch"), global_step=i+1)
except KeyboardInterrupt:
df = pd.DataFrame(labels_by_epoch)
os.makedirs(tensorboard_dir, exist_ok=True)
df.to_csv(os.path.join(tensorboard_dir, 'labels.csv'))
df = pd.DataFrame(pred_by_epoch)
df.to_csv(os.path.join(tensorboard_dir, 'preds.csv'))
# print(labels_by_epoch)
# save labels and predictions as csv data
df = pd.DataFrame(labels_by_epoch)
os.makedirs(tensorboard_dir, exist_ok=True)
df.to_csv(os.path.join(tensorboard_dir, 'labels.csv'))
df = | pd.DataFrame(pred_by_epoch) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=['a', 'b', 'c'])
result = df.groupby('a').nth(10)
expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import sys
import os
import pdb
sys.path.append('../../data')
import shutil
import re
ids = pd.read_csv('../../metadata/pball_site_ids.csv', header=None)
ids = ids[0].values
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]
n_lakes = len(train_lakes)
test_lakes = ids[~np.isin(ids, train_lakes)]
ids = train_lakes
new_df = pd.DataFrame()
inc = []
for ct, i_d in enumerate(ids):
#for each target id
print(ct, ": ", i_d)
try:
diffs = | pd.read_feather("../../metadata/diffs/target_nhdhr_"+ i_d +".feather") | pandas.read_feather |
# -*- coding: utf-8 -*-
"""
Created on Mon 11 January 2022
Modified by <EMAIL> on 21/10/2021
@author: <NAME>
@contact: <EMAIL>
@license: /
"""
import mmap
import os
import sys
import numpy as np
import pandas as pd
from io import StringIO
from configparser import ConfigParser
class CustomParser(ConfigParser):
"""
Original from https://gitlab.cern.ch/datanaso/dEval/-/blob/master/etc/utils.py
ConfigParser with a custom dictionary conversion method.
"""
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
class ProcessorBase():
'''
Base-class for MR-ToF MS data proessing
'''
def __init__(self):
self.files = []
self.data = {}
self.pars = {}
self.df_dict = {}
self.step = 0
def add_all(self, to_csv=False):
'''
Returns sum of all handled files while incrementing sweep numbers from one file to the next to
get a rolling sweep number.
Parameters:
- to_csv: File name to store combined .csv in
Return:
- flattened and sweep-adjusted dataframe
'''
if len(self.files) == 0:
print(f"(ProcessorBase.add_all): Data not processed yet or empty.")
return
# Adjust the sweep numbers
for i in np.arange(0, len(self.df_dict)):
if i == 0:
continue
key = list(self.df_dict.keys())[i]
key_m1 = list(self.df_dict.keys())[i-1]
self.df_dict[key]['sweep'] += self.df_dict[key_m1].iloc[-1]['sweep'] + 1
#
df = pd.concat(self.df_dict)
# Save to file if file name is passed
if to_csv != False:
df.to_csv(to_csv, index=False)
#
return df
class MCS6Lst(ProcessorBase):
'''
Process each list file contained in array of lst_files
Param:
lst_files: array of list files
'''
"""
Created on Mon 23 April 2018
Modified and adapted to Python 3 on Wed 17 July 2019
Modified by <EMAIL> on 21/10/2021
@author: <NAME>
@author: <NAME>
@contact: <EMAIL>
@contact: <EMAIL>
@license: MIT license
"""
def __init__(self):
"""
Initialize the conversion dataframe and some other varaibles
"""
self.files = []
#-------------------------Create a dataframe containing the conversion table from the MCS6A manual--------------------------------------#
Time_Patch_Value = ['0', '5', '1', '1a', '2a', '22', '32','2','5b','Db','f3','43','c3','3']
conversion_dict = {'Data_Length' : pd.Series([2,4,4,6,6,6,6,6,8,8,8,8,8,8], index=Time_Patch_Value),
'Data_Lost_Bit' : pd.Series([np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,47,np.nan ,63,np.nan ,47,63,np.nan ,63], index=Time_Patch_Value),
'Tag_Bits' : pd.Series([(np.nan,np.nan) ,(np.nan,np.nan) ,(np.nan,np.nan) ,(np.nan,np.nan) ,(40,47),(40,47),(np.nan,np.nan), (np.nan,np.nan), (48,62),(48,63),(48,63),(48,62),(48,63),(58,62)], index=Time_Patch_Value),
'Sweep_Counter': pd.Series([(np.nan,np.nan),(24,31),(np.nan,np.nan),(32,47),(32,39),(np.nan,np.nan),(40,46),(np.nan,np.nan),(32,47),(32,47),(40,46),(np.nan,np.nan),(np.nan,np.nan),(np.nan,np.nan)], index=Time_Patch_Value),
'Time_Bits': pd.Series([12,20,28,28,28,36,36,44,28,28,36,44,44,54], index=Time_Patch_Value),
'Max_Sweep_Length': pd.Series([0.0000004096,0.000105,0.027,0.027,0.027,6.872,6.872,1759.2,0.027,0.027,6.872,1759.2,1759.2,1801440], index=Time_Patch_Value)}
self.conversion_df = pd.DataFrame(conversion_dict)
self.df_dict = {}
def convert_bytes(self,bytearray,nb_bits,data_lost_bit,tag_bits,sweep_counter,time_bits,verbose=0):
'''
Perform the actual conversion of a single event from binary to integer numbers
See pages 5-19 and 5-20 of FastCom MCS6A for more information on the bits meaning
Param:
bytearray : an array of nb_bits/8 bytes encapsulating the data of a single MCS6A stop event
nb_bits : total number of bits on which the data is encoded
data_lost_bit : Data lost bit. Indicates if the fifo was fullself. 1 bit index
tag_bits : Tag bits. Tag info of a single stop event (see manual). Tuple containing the bit indexes.
sweep_counter: Sweep number of a single stop event. Tuple containing the bit indexes.
time_bits : Number of bits encoding the time of flight a single stop eventself.
The tof seems to be given in an unbinned format with 100ps resolution (to be confirmed).
Return:
Decoded tof, sweep, channel, edge, tag, fifo
'''
bit_word = ''
for bytes in reversed(bytearray):
bit_word += '{0:08b}'.format(ord(chr(bytes)))
#convert data lost bit always first index in the reversed array (last in the manual)
if np.isnan(data_lost_bit):
fifo = np.nan
index_data_lost_bit = -1
else:
index_data_lost_bit = nb_bits-1-int(data_lost_bit)
fifo = int(bit_word[index_data_lost_bit],2)
#convert tag bit
if np.isnan(tag_bits[0]):
tag = np.nan
index_high_tag = -1
index_low_tag = -1
else:
index_high_tag = nb_bits-1-int(tag_bits[1])
index_low_tag = nb_bits-1-int(tag_bits[0])
tag = int(bit_word[index_high_tag:index_low_tag+1],2)
#convert sweep number
if np.isnan(sweep_counter[0]):
sweep = np.nan
index_high_sweep = -1
index_low_sweep = -1
else:
index_high_sweep = nb_bits-1-int(sweep_counter[1])
index_low_sweep = nb_bits-1-int(sweep_counter[0])
sweep = int(bit_word[index_high_sweep:index_low_sweep+1],2)
if bit_word != "000000000000000000000000000000000000000000000000" and verbose>1:
print(f"bit_word: {bit_word}")
print(f"index_data_lost_bit: {fifo}")
print(f"index_high_tag: {index_high_tag}")
print(f"index_low_tag: {index_low_tag}")
print(f"tag: {tag}")
print(f"index_high_sweep: {index_high_sweep}")
print(f"index_low_sweep: {index_low_sweep}")
print(f"sweep: {sweep}")
#convert time of flight
index_high_tof = max(index_low_sweep,index_low_tag,index_data_lost_bit)+1
index_low_tof = index_high_tof+time_bits
tof = int(bit_word[index_high_tof:index_low_tof],2)
#these are always there no matter the format
channel = int(bit_word[index_low_tof+1:],2)
edge = int(bit_word[index_low_tof],2)
# if tof != 0:
# print(tof, sweep-1, channel, edge, tag, fifo)
return tof, sweep-1, channel, edge, tag, fifo
def decode_binary(self,binary,time_patch, verbose = 0):
'''
Read the binary part of the file by chunks and decode each chunk according to the format
given in time_patch
The length of a chunk is given by the time_patch
Param : binary part of
Return: nunpy array containing the converted data : tof, sweep, channel, edge, tag, fifo
'''
#-----------extract data from the dataframe--------------------------#
data_length = int(self.conversion_df.loc[time_patch.decode('ascii'),'Data_Length'])
nb_bits = 8*data_length #convert nb of bytes into nb of bits
data_lost_bit = self.conversion_df.loc[time_patch.decode('ascii'),'Data_Lost_Bit']
tag_bits = self.conversion_df.loc[time_patch.decode('ascii'),'Tag_Bits']
sweep_counter = self.conversion_df.loc[time_patch.decode('ascii'),'Sweep_Counter']
time_bits = int(self.conversion_df.loc[time_patch.decode('ascii'),'Time_Bits'])
max_sweep_length = self.conversion_df.loc[time_patch.decode('ascii'),'Max_Sweep_Length']
steps = len(binary[binary.tell():])/data_length
first_it = True
if verbose>1:
print(f"Data length: {data_length}\nN bits: {nb_bits}\nData lost bit: {data_lost_bit}\n\
tag bits: {tag_bits}\nsweep_counter: {sweep_counter}\ntime_bits: {time_bits}\n\
max sweep length: {max_sweep_length}\nsteps: {steps}\n")
# !
# Introduce sweep_counter_overflow: in some cases, MCS6 seems to allocated only a small amount of bits for storing the sweep number.
# In time_patch=32 this is for example only 7 bits -> can count to 128 and then resets to 0. With the overflow counter we just
# count how many overflows happen and just at the necessary amount of sweeps to the overall sweep number
sweep_counter_overflow = 0
old_sweep = 0 # for detecting when overflow happens
# loop through all bytewords
for i in range(int(steps)):
if verbose>0:
if (i%(int(steps/10))==0):
print(f"Step {i} of {steps}.")
byteword = binary.read(data_length)
tof, sweep, channel, edge, tag, fifo = self.convert_bytes(byteword,nb_bits,
data_lost_bit, tag_bits, sweep_counter, time_bits, verbose=verbose)
# Check whether overflow happened (for example old_sweep = 127, new sweep is 0)
# Only do for non-zero events:
if tof != 0:
if verbose>1: print(f"old_sweep: {old_sweep}")
if old_sweep > sweep:
sweep_counter_overflow += 1
if verbose>1: print(f"sweep_counter_overflow: {sweep_counter_overflow}")
old_sweep = sweep
# Add overflow to the sweep number (in case sweep has 7bit int -> 2**7=128)
sweep += sweep_counter_overflow*(2**(sweep_counter[1]-sweep_counter[0]+1))
if verbose>1: print(f"sweep: {sweep}")
#
if channel != 0 :#means for real data
if first_it:
converted_data = np.array([tof, sweep, channel, edge, tag, fifo])
first_it = False
else :
converted_data = np.vstack((converted_data, np.array([tof, sweep, channel, edge, tag, fifo])))
binary.close()
return converted_data
def get_time_patch_and_binary(self, listfile, verbose=False):
'''
Memory map the list file and isolate the time_patch and the binary part of the data
Param:
listfile : input list file
Return:
mapped_file : memore map of the input listfile
time_patch : string code indicating the format in which the data are written (see manual)
'''
mapped_file = mmap.mmap(listfile.fileno(), 0, access=mmap.ACCESS_READ)
search_dict = {'section' : '[DATA]' , 'list_file_type' : 'time_patch'}
#-----------------set file index to time patch code -----------------#
pos_type_from = mapped_file.find(search_dict['list_file_type'].encode('ascii'))+len(search_dict['list_file_type'])+1
mapped_file.seek(pos_type_from)
time_patch = mapped_file.readline().strip('\r\n'.encode('ascii'))
#-----------set file index to beginning of DATA-----------------------------------#
pos_data_from = mapped_file.find(search_dict['section'].encode('ascii'))
mapped_file.seek(pos_data_from)
#---readline and there no matter what the file index should point to the beginning of the binary data
mapped_file.readline()
if verbose>1:
print(f"pos_type_from: {pos_type_from}\npos_data_from: {pos_data_from}\ntime_patch: {time_patch}")
return mapped_file, time_patch
def process(self,file_array,to_csv = False, verbose=0):
"""
Perform the processing of the files
Parameters:
- file_array: Array of file-paths
- to_csv: if true, saves files under it's file name with .csv extension
- verbose: verbosity
"""
full_info = False # for regular application the channel, edge, tag and fifo info are constant so they don't have to be saved. In that case keep full_info = False
self.files = file_array
for filename in self.files:
with open(filename,'rb') as listfile:
binary, time_patch = self.get_time_patch_and_binary(listfile, verbose=verbose)
if full_info:
converted_data = self.decode_binary(binary,time_patch,verbose=verbose) # np.array with tof, sweep, channel, edge, tag, fifo
header_res ='tof,sweep,channel,edge,tag,fifo'
if to_csv:
np.savetxt('{}/{}.csv'.format(os.path.split(filename)[0],os.path.splitext(os.path.basename(filename))[0]),converted_data,
fmt = '%i,%i,%i,%i,%f,%f', header = header_res)
else:
converted_data = pd.DataFrame(self.decode_binary(binary,time_patch,verbose)[:, [0,1]], columns=['tof', 'sweep']) # saves only tof and sweep info
converted_data.tof = converted_data.tof/10 # 100ps -> ns
converted_data.sweep = converted_data.sweep.astype('int64') # sweep is only int
if to_csv:
converted_data.to_csv('{}/{}.csv'.format(os.path.split(filename)[0],os.path.splitext(os.path.basename(filename))[0]), index=False)
print('File {} loaded successfully!'.format(os.path.splitext(os.path.basename(filename))[0]))
self.df_dict[os.path.splitext(os.path.basename(filename))[0]] = converted_data
if full_info == False:
return( | pd.concat(self.df_dict, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Code to stack SimFin intermediaries/load into Postgres DB
05/08/2019
<NAME>
"""
import pandas as pd
# Set up connection to Postgres DB
# from sqlalchemy import create_engine
# engine = create_engine("postgresql://postgres:dfdk#418!@@192.168.3.113/postgres")
# c = engine.connect()
# conn = c.connection
# Use process file to create all relevant file extensions
PROCESS_FILE = pd.read_csv('PROCESS_FILE.csv')
PROCESS_FILE = PROCESS_FILE.sort_values(['statement_type', 'year', 'quarter'])
PROCESS_FILE['year'] = PROCESS_FILE['year'].map(str)
exts = (PROCESS_FILE[['statement_type', 'quarter', 'year']]
.apply(lambda x: '_'.join(x), axis=1)
.tolist())
# Loop through file extensions, appending to SQL database/running list of csvs
pls = []
bss = []
cfs = []
for e in exts:
file = 'simfin_{}.csv'.format(e)
statement = pd.read_csv(file)
if 'pl' in e:
pls.append(statement)
# statement.to_sql('qtrly_simfin_pl', con=engine, if_exists='append')
if 'cf' in e:
cfs.append(statement)
# statement.to_sql('qtrly_simfin_cf', con=engine, if_exists='append')
if 'bs' in e:
bss.append(statement)
# statement.to_sql('qtrly_simfin_bs', con=engine, if_exists='append')
# Load shares data into Postgres DB
shares_data = pd.read_csv('simfin_shares.csv')
# shares_data.to_sql('qtrly_simfin_shares', con=engine, if_exists='replace')
# Export csvs to pull in wrangling given driver parameters
pd.concat(pls, axis=0).to_csv('qtrly_simfin_pl.csv', index=False)
pd.concat(cfs, axis=0).to_csv('qtrly_simfin_cf.csv', index=False)
| pd.concat(bss, axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 17:13:22 2018
@author: kitreatakataglushkoff
Kitrea's hand-written copied/adjusted version of the analyze_massredistribution.py,
which was last significantly edited Thursday July 18.
UPDATE - Oct 9, 2018 - Kitrea double-checked code, added some comments.
last updated Wed Nov 14 - to clean out bad data in the new large dataset.
"""
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import copy
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
# Tips, comments, and old portions of code no longer used have been moved to bottom of file
#%% ===== REGION AND GLACIER FILEPATH OPTIONS =====
# User defines regions of interest
rgi_regionO1 = [13, 14, 15]
#rgi_regionO1 = [15]
search_binnedcsv_fn = (input.main_directory + '/../DEMs/Shean_2018_1109/aster_2000-2018_20181109_bins/*_mb_bins.csv')
#%% ===== PLOT OPTIONS =====
# Option to save figures
option_savefigs = 1
fig_fp = input.main_directory + '/../Output/figures/massredistribution/'
# Plot histogram options
option_plot_histogram = 0
histogram_parameters = ['Area', 'Zmed', 'Slope', 'PercDebris']
#histogram_parameters = ['Area', 'Zmin', 'Zmax', 'Zmed', 'Slope', 'Aspect', 'Lmax', 'PercDebris']
# Plot dhdt of each glacier options
option_plot_eachglacier = 0
# Plot glaciers above and below a given parameter threshold (*MAIN FUNCTION TO RUN)
option_plot_multipleglaciers_single_thresholds = 0
# run for specific parameter or all parameters
option_run_specific_pars = 0
# Plot glaciers above and below a given set of multiple thresholds
option_plot_multipleglaciers_multiplethresholds = 0
# Plot glacier characteristics to see if parameters are related
option_plot_compareparameters = 1
#option_plot_multipleglaciers_binned_parameter = 0 #glaciers within a characteristic's defined range
#option_plot_multipleglaciers_indiv_subdivisions = 0 #glaciers binned into 6 categories. (NOT USED)
#option_plots_threshold = 0 #scatter plots relating glacier stats
# Columns to use for mass balance and dhdt (specify mean or median)
mb_cn = 'mb_bin_med_mwea'
dhdt_cn = 'dhdt_bin_med_ma'
dhdt_max = 2.5
dhdt_min = -4
# Threshold for tossing glaciers with too much missing data
perc_area_valid_threshold = 90
# Switch to use merged data or not (0 = don't use, 1 = use merged data)
option_use_mergedata = 0
# Remove glacier options (surging, all positive dhdt, etc.)
option_remove_surge_glac = 1
option_remove_all_pos_dhdt = 1
option_remove_dhdt_acc = 1
acc_dhdt_threshold = 0.5
# Legend option (switch to show legend on multi-glacier figures or not)
option_show_legend = 0
# Transparency value (between 0 & 1: 0 = no plot, 1 = full opaque)
glacier_plots_transparency = 0.3
#user-defined stored variables for ideal thresholds, for each region and parameter
Area_15_thresholds = list(range(5,40, 5))
Area_13_thresholds = list(range(5, 120, 5))
Area_13_thresholds.extend([150, 200, 250, 300, 350]) #if histogram has 2 separate ranges use .extend
Slope_15_thresholds = list(range(10,26,2))
Slope_13_thresholds = list(range(5, 40, 2))
PercDebris_13_thresholds = list(range(0,65,5))
PercDebris_15_thresholds = list(range(0, 65, 5))
Zmin_13_thresholds = list(range(2600,5800, 200))
Zmin_15_thresholds = list(range(3500, 6500, 500))
Zmed_13_thresholds = list(range(3800, 6600, 200))
Zmed_15_thresholds = list(range(4750, 7000, 500))
Aspect_13_thresholds = list(range(0, 450, 90))
Aspect_15_thresholds = list(range(0, 450, 90))
Zmax_15_thresholds = list(range(6000, 7600, 200))
Zmax_13_thresholds = list(range(4000, 7600, 200))
Lmax_15_thresholds = list(range(4000, 14000, 2000))
Lmax_13_thresholds = list(range(4400, 40000, 2000))
Lmax_13_thresholds.extend([56000, 58000, 6000])
dhdt_13_thresholds = [1]
Area_14_thresholds = list(range(5, 120, 5,))
Area_14_thresholds.extend([150, 200, 250, 300, 350])
Zmin_14_thresholds = list(range(2600, 5800, 200))
Zmax_14_thresholds = list(range(5000, 7600, 200))
Zmed_14_thresholds = list(range(3800,6400, 200))
Slope_14_thresholds = list(range(10, 42, 2))
Aspect_14_thresholds = list(range(0,450,90))
Lmax_14_thresholds = list(range(4000, 45000,2000))
PercDebris_14_thresholds = list(range(0, 65,5))
#For plotting one parameter at a time
#User defines parameter for multi-glacier and histogram runs
#set the threshold equal to one of the above, defined thresholds, depending on the current
#keep in mind for threshold, that the subplots are examining >= and < the threshold
#If you have not yet evaluated the histograms to define the threshold ranges,
#then you must define the following variable
#For plotting multiple parameters in one run
#Create dictionary. key = parameter found in main_glac_rgi, value = thresholds
all_13_pars = {'Area': Area_13_thresholds, 'Zmin': Zmin_13_thresholds ,
'Zmax':Zmax_13_thresholds, 'Zmed': Zmed_13_thresholds,
'Slope': Slope_13_thresholds, 'Aspect': Aspect_13_thresholds,
'Lmax': Lmax_13_thresholds, 'PercDebris': PercDebris_13_thresholds}
all_14_pars = {'Area': Area_14_thresholds, 'Zmin': Zmin_14_thresholds ,
'Zmax':Zmax_14_thresholds, 'Zmed': Zmed_14_thresholds,
'Slope': Slope_14_thresholds, 'Aspect': Aspect_14_thresholds,
'Lmax': Lmax_14_thresholds, 'PercDebris': PercDebris_14_thresholds}
all_15_pars = {'Area': Area_15_thresholds , 'Zmin': Zmin_15_thresholds ,
'Zmax':Zmax_15_thresholds, 'Zmed': Zmed_15_thresholds,
'Slope': Slope_15_thresholds, 'Aspect': Aspect_15_thresholds,
'Lmax': Lmax_15_thresholds, 'PercDebris': PercDebris_15_thresholds}
#If only plotting one parameter in the run, define the parameter of interest
pars_dict = {'PercDebris': PercDebris_13_thresholds}
if option_run_specific_pars == 1:
region_pars = pars_dict
else:
if rgi_regionO1[0] == 13:
region_pars = all_13_pars
elif rgi_regionO1[0] == 14:
region_pars = all_14_pars
elif rgi_regionO1[0] == 15:
region_pars = all_15_pars
else:
print("Please Check Region Specification")
#Binned CSV column name conversion dictionary
# change column names so they are easier to work with (remove spaces, etc.)
sheancoldict = {'# bin_center_elev_m': 'bin_center_elev_m',
' z1_bin_count_valid': 'z1_bin_count_valid',
' z1_bin_area_valid_km2': 'z1_bin_area_valid_km2',
' z1_bin_area_perc': 'z1_bin_area_perc',
' z2_bin_count_valid': 'z2_bin_count_valid',
' z2_bin_area_valid_km2': 'z2_bin_area_valid_km2',
' z2_bin_area_perc': 'z2_bin_area_perc',
' dhdt_bin_count' : 'dhdt_bin_count',
' dhdt_bin_area_valid_km2' : 'dhdt_bin_area_valid_km2',
' dhdt_bin_area_perc' : 'dhdt_bin_area_perc',
' dhdt_bin_med_ma': 'dhdt_bin_med_ma',
' dhdt_bin_mad_ma': 'dhdt_bin_mad_ma',
' dhdt_bin_mean_ma': 'dhdt_bin_mean_ma',
' dhdt_bin_std_ma': 'dhdt_bin_std_ma',
' mb_bin_med_mwea': 'mb_bin_med_mwea',
' mb_bin_mad_mwea': 'mb_bin_mad_mwea',
' mb_bin_mean_mwea': 'mb_bin_mean_mwea',
' mb_bin_std_mwea': 'mb_bin_std_mwea',
' debris_thick_med_m': 'debris_thick_med_m',
' debris_thick_mad_m': 'debris_thick_mad_m',
' perc_debris': 'perc_debris',
' perc_pond': 'perc_pond',
' perc_clean': 'perc_clean',
' dhdt_debris_med' : 'dhdt_debris_med',
' dhdt_pond_med' : 'dhdt_pond_med',
' dhdt_clean_med' : 'dhdt_clean_med',
' vm_med' : 'vm_med',
' vm_mad' : 'vm_mad',
' H_mean' : 'H_mean',
' H_std' : 'H_std'}
#%% Select Files
# Find files for analysis; create list of all binnedcsv filenames (fn)
binnedcsv_files_all = glob.glob(search_binnedcsv_fn)
# Fill in dataframe of glacier names and RGI IDs, of ALL glaciers with binnedcsv, regardless of the region
df_glacnames_all = pd.DataFrame() #empty df
df_glacnames_all['reg_glacno'] = [x.split('/')[-1].split('_')[0] for x in binnedcsv_files_all]
df_glacnames_all['RGIId'] = 'RGI60-' + df_glacnames_all.reg_glacno
# turn region column values from object to float to int, to store just reg
df_glacnames_all['region'] = df_glacnames_all.reg_glacno.astype(float).astype(int)
# split glacno into list of reg and id, and store just the id part as an object
df_glacnames_all['glacno_str'] = (df_glacnames_all.reg_glacno.str.split('.').apply(lambda x: x[1]))
# store the same value as glacno_str, but as an int
df_glacnames_all['glacno'] = df_glacnames_all.glacno_str.astype(int)
# Define df_glacnames containing ONLY the data for desired region(s)
df_glacnames = df_glacnames_all[df_glacnames_all.region.isin(rgi_regionO1) == True]
# make list of all binnedcsv file pathway names
binnedcsv_files = [binnedcsv_files_all[x] for x in df_glacnames.index.values]
# Sort glaciers by region and glacier number
binnedcsv_files = sorted(binnedcsv_files)
df_glacnames = df_glacnames.sort_values('reg_glacno')
df_glacnames.reset_index(drop=True, inplace=True)
# Create dataframe with RGI attributes for each glacier
main_glac_rgi = pd.DataFrame()
for n, region in enumerate(rgi_regionO1):
print('Region', region)
df_glacnames_reg = df_glacnames[df_glacnames.region == region] #temp df for one reg at a time
rgi_glac_number = df_glacnames_reg['glacno_str'].tolist()
#If statement to avoid errors associated with regions that have no glaciers
if len(rgi_glac_number) > 0:
#pullselect data from fxn outputs of pygemfxnsmodelsetup, and from
#pathways and vars defined in pygem input file
main_glac_rgi_reg= modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2='all',
rgi_glac_number=rgi_glac_number)
# concatenate regions
main_glac_rgi = main_glac_rgi.append(main_glac_rgi_reg, ignore_index=True)
#%%MAIN DATASET
# ds is the main dataset for this analysis and is a list of lists (order of glaciers can be found in df_glacnames)
# Data for each glacier is held in a sublist
ds = []
norm_list = []
for n in range(len(binnedcsv_files)):
# Note: RuntimeWarning: invalid error encountered in greater than is due
# to nan and zero values being included in array. This error can be ignored.
# Process binned geodetic data
binnedcsv = pd.read_csv(binnedcsv_files[n])
# Rename columns so they are easier to read
binnedcsv = binnedcsv.rename(columns=sheancoldict)
# ===== Filter out bad values ==========================================================
# Replace strings of nan with nan and make all columns floats or ints
binnedcsv = binnedcsv.replace([' nan'], [np.nan])
for col in binnedcsv.columns.values:
binnedcsv[col] = pd.to_numeric(binnedcsv[col])
# Remove bad values of dhdt
binnedcsv.loc[binnedcsv[dhdt_cn] > dhdt_max, dhdt_cn] = np.nan
binnedcsv.loc[binnedcsv[dhdt_cn] < dhdt_min, dhdt_cn] = np.nan
# If dhdt is nan, remove row
null_bins = binnedcsv.loc[pd.isnull(binnedcsv[dhdt_cn])].index.values
binnedcsv = binnedcsv.drop(null_bins)
# Add percent area valid to main_glac_rgi
main_glac_rgi.loc[n, 'perc_areavalid'] = binnedcsv['z1_bin_area_perc'].sum()
# Debris thickness
binnedcsv['debris_thick_med_m'] = binnedcsv['debris_thick_med_m'].astype(float)
binnedcsv.loc[pd.isnull(binnedcsv['debris_thick_med_m']), 'debris_thick_med_m'] = 0
binnedcsv.loc[binnedcsv['debris_thick_med_m'] < 0, 'debris_thick_med_m'] = 0
binnedcsv.loc[binnedcsv['debris_thick_med_m'] > 5, 'debris_thick_med_m'] = 0
binnedcsv.loc[binnedcsv['debris_thick_med_m'] == -0, 'debris_thick_med_m'] = 0
#Percent Debris
binnedcsv.loc[binnedcsv['perc_debris'] > 100, 'perc_debris'] = 0
binnedcsv.loc[binnedcsv['perc_debris'] <= 0, 'perc_debris'] = 0
# Supraglacial ponds
binnedcsv.loc[binnedcsv['perc_pond'] > 100, 'perc_pond'] = 0
binnedcsv.loc[binnedcsv['perc_pond'] <= 0, 'perc_pond'] = 0
# Clean ice
binnedcsv.loc[binnedcsv['perc_clean'] > 100, 'perc_clean'] = 0
binnedcsv.loc[binnedcsv['perc_clean'] <= 0, 'perc_clean'] = 0
# Find glacier-wide debris perc for each glacier, and add to main_glac_rgi
glacwide_debris = ((binnedcsv['z1_bin_area_valid_km2']*binnedcsv['perc_debris']).sum()
/ binnedcsv['z1_bin_area_valid_km2'].sum())
main_glac_rgi.loc[n, 'PercDebris'] = glacwide_debris
#sort out glaciers based on if they have all positive dh/dt, all negative, dh/dt, or both
#based on evaluating, for each glacier, the max from the list of dhdt and the min from the list.
if np.nanmin(binnedcsv[dhdt_cn].astype(float)) >= 0:
glacwide_dhdt_sign = 1 #glaciers with all positive dh/dt
elif np.nanmax(binnedcsv[dhdt_cn].astype(float)) <= 0:
glacwide_dhdt_sign = -1 #glaciers with all negative dh/dt
else:
glacwide_dhdt_sign = 0 #glaciers with both, + and - dh/dt
main_glac_rgi.loc[n, 'dhdt_sign'] = glacwide_dhdt_sign
# ===== Normalized elevation vs. ice thickness change ===============================
# Normalized elevation
# (max elevation - bin elevation) / (max_elevation - min_elevation)
glac_elev = binnedcsv.bin_center_elev_m.values
binnedcsv['elev_norm'] = (glac_elev[-1] - glac_elev) / (glac_elev[-1] - glac_elev[0])
# Normalized ice thickness change [ma]
# dhdt / dhdt_max
glac_dhdt = binnedcsv[dhdt_cn].values.astype(float)
# Shifted normalized ice thickness change such that everything is negative
binnedcsv['dhdt_norm_shifted'] = (glac_dhdt - np.nanmax(glac_dhdt)) / np.nanmin(glac_dhdt - np.nanmax(glac_dhdt))
binnedcsv.loc[binnedcsv['dhdt_norm_shifted'] == -0, 'dhdt_norm_shifted'] = 0
# Replace positive values to zero
glac_dhdt[glac_dhdt >= 0] = 0
binnedcsv['dhdt_norm_huss'] = glac_dhdt / np.nanmin(glac_dhdt)
binnedcsv.loc[binnedcsv['dhdt_norm_huss'] == -0, 'dhdt_norm_huss'] = 0
# ===== ADD DATA TO MAIN DATASET =====================================================
# ds is the main datset, n is index of each glacier
# Keep only glaciers with enough good data based on percentage area valid
if main_glac_rgi.loc[n, 'perc_areavalid'] > perc_area_valid_threshold:
ds.append([n, df_glacnames.loc[n, 'RGIId'], binnedcsv, main_glac_rgi.loc[n]])
# ds.append([n, df_glacnames.loc[n, 'RGIId'], binnedcsv, main_glac_rgi.loc[n], main_glac_hyps.loc[n],
# main_glac_icethickness.loc[n], ds_merged_bins])
#%% Remove Unwanted Glaciers
# NOTE: TO USE MAIN_GLAC_RGI ATTRIBUTES, NEED TO ACCESS THEM VIA THE DATASET
# remove them from both ds and norm_list
remove_idx = []
# Indices to remove Surging glaciers (listed as 1 possible, 2 probable, or 3 observed in main_glac_rgi)
if option_remove_surge_glac == 1:
# Remove indices
remove_idx_surge = [i for i in range(len(ds)) if ((ds[i][3].Surging != 9) and (ds[i][3].Surging != 0))]
# Add unique values to list
for i in remove_idx_surge:
if i not in remove_idx:
remove_idx.append(i)
# Indices to remove glaciers with all positive dh/dt values (listed as 1 in main_glac_rgi)
if option_remove_all_pos_dhdt == 1:
#add index of glaciers with all pos values to the Int64 Index list
remove_idx_allposdhdt = [i for i in range(len(ds)) if ds[i][3].dhdt_sign == 1]
for i in remove_idx_allposdhdt:
if i not in remove_idx:
remove_idx.append(i)
# Indices to remove glaciers who have max surface lowering in accumulation area
if option_remove_dhdt_acc == 1:
remove_idx_acc = []
for glac in range(len(ds)):
glac_elevnorm = ds[glac][2]['elev_norm'].values
glac_dhdt_norm = ds[glac][2]['dhdt_norm_huss'].values
acc_idx = np.where(glac_elevnorm < 0.5)[0]
if (glac_dhdt_norm[acc_idx] > acc_dhdt_threshold).any():
remove_idx_acc.append(glac)
for i in remove_idx_acc:
if i not in remove_idx:
remove_idx.append(i)
# ===== Remove glaciers =====
all_glac_idx = range(len(ds))
ds = [ds[i] for i in all_glac_idx if i not in remove_idx]
# ===== Normalized elevation versus ice thickness change list ======
# List of np.array where first column is elev_norm and second column is dhdt_norm
# Each item is a glacier
norm_list = [np.array([ds[i][2]['elev_norm'].values, ds[i][2]['dhdt_norm_huss'].values]).transpose()
for i in range(len(ds))]
#%% MEAN AND STANDARD DEVIATIONS OF CURVES (black lines to add onto plots)
def normalized_stats(norm_list):
# Merge norm_list to make array of all glaciers with same elevation normalization space
max_length = len(max(norm_list,key=len)) #len of glac w most norm values
norm_all = np.zeros((max_length, len(norm_list)+1)) #array: each col a glac, each row a norm dhdt val to be interpolated
# First column is normalized elevation, pulled from the glac with most norm vals
norm_all[:,0] = max(norm_list,key=len)[:,0]
# Loop through each glacier's normalized array (where col1 is elev_norm and col2 is norm dhdt)
for n in range(len(norm_list)):
# print(main_glac_rgi.loc[n,'RGIId']) #NOT SURE IF THIS WILL SHOW THE CORRECT CORRESPONDING GLACIER
norm_single = norm_list[n] # get one glacier at a time
# #Skip over glaciers that contain only NaN values for normalized dhdt
# #(I added this so that it could run, but I want to be sure it doesn't have weird implications.)
# if np.isnan(norm_single[:,1][0]) == True and np.isnan(norm_single[:,1][-1]) == True:
## print('The current glacier likely only contains NaNs, and is being skipped')
# continue
# #also skip over glaciers that contain almost all 0 values .
# Fill in nan values for elev_norm of 0 and 1 with nearest neighbor
norm_single[0,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][0], 1]
norm_single[-1,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][-1], 1]
# Remove nan values.
norm_single = norm_single[np.where(~np.isnan(norm_single[:,1]))] #~ is the same as !
elev_single = norm_single[:,0] #set name for first col of a given glac
dhdt_single = norm_single[:,1] #set name for the second col of a given glac
#loop through each dhdt value of the glacier, and add it and interpolate to add to the
#norm_all array.
for r in range(0, max_length):
if r == 0:
norm_all[r,n+1] = dhdt_single[0] #put the first value dhdt value into the norm_all. n+1 because the first col is taken by the elevnorms.
elif r == (max_length - 1):
norm_all[r,n+1] = dhdt_single[-1] #put the last value into the the last row for the glacier's 'stretched out'(interpolated) normalized curve.
else:
# Find value need to interpolate to
norm_elev_value = norm_all[r,0] #go through each row in the elev (col1)
# Find index of value above it from dhdt_norm, which is a different size
upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_value].min())[0][0]
# Find index of value below it
lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_value].max())[0][0]
#get the two values, based on the indices.
upper_elev = elev_single[upper_idx]
upper_value = dhdt_single[upper_idx]
lower_elev = elev_single[lower_idx]
lower_value = dhdt_single[lower_idx]
#Linearly Interpolate between two values, and plug in interpolated value into norm_all
norm_all[r,n+1] = (lower_value + (norm_elev_value - lower_elev) / (upper_elev - lower_elev) *
(upper_value - lower_value))
# Compute mean and standard deviation
norm_all_stats = pd.DataFrame()
norm_all_stats['norm_elev'] = norm_all[:,0]
norm_all_stats['norm_dhdt_mean'] = np.nanmean(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_std'] = np.nanstd(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_68high'] = norm_all_stats['norm_dhdt_mean'] + norm_all_stats['norm_dhdt_std']
norm_all_stats['norm_dhdt_68low'] = norm_all_stats['norm_dhdt_mean'] - norm_all_stats['norm_dhdt_std']
norm_all_stats.loc[norm_all_stats['norm_dhdt_68high'] > 1, 'norm_dhdt_68high'] = 1
norm_all_stats.loc[norm_all_stats['norm_dhdt_68low'] < 0, 'norm_dhdt_68low'] = 0
return norm_all_stats
norm_stats = normalized_stats(norm_list)
#%% Plots comparing glacier parameters to see if any are related
if option_plot_compareparameters == 1:
parameter1 = 'Area'
parameter2 = 'Lmax'
A = np.array([ds[x][3][parameter1] for x in range(len(ds))])
B = np.array([ds[x][3][parameter2] for x in range(len(ds))])
param_label_dict = {'Area': 'Area [km2]',
'PercDebris': 'Debris cover[%]',
'Slope':'Slope [deg]',
'Lmax': 'Length [km]'}
# ===== PLOT =====
fig_width = 4
fig_height = 3
fig, ax = plt.subplots(1, 1, squeeze=False, figsize=(fig_width,fig_height),
gridspec_kw = {'wspace':0.2, 'hspace':0.5})
ax[0,0].scatter(A,B, color='k', s=1)
ax[0,0].set_xlabel(param_label_dict[parameter1], size=14)
ax[0,0].set_ylabel(param_label_dict[parameter2], size=14)
# Save figure
fig.savefig(fig_fp + ('scatter_' + parameter1 + '_' + parameter2 + '.png'), bbox_inches='tight', dpi=300)
#%% Plots for a histogram of parameter (distribution of values)
def plot_var_histogram():
#plot histogram, where x-axis is the testing_var values, and y-axis is how many glaciers have that given x-axis value
for parameter in histogram_parameters:
parameter_values = np.array([ds[i][3][parameter] for i in range(len(ds))])
plt.hist(parameter_values, 50)
plt.xlabel(parameter)
plt.ylabel('Number of glaciers')
plt.title(parameter + ' Distribution' ' Region' + str(rgi_regionO1))
plt.minorticks_on()
if option_savefigs == 1:
hist_fp = fig_fp + 'histograms/'
if not os.path.exists(hist_fp):
os.makedirs(hist_fp)
plt.savefig(hist_fp + parameter + '_histogram_reg_' + str(rgi_regionO1), bbox_inches='tight')
plt.show()
parameter_lower_bound = int(parameter_values.min())
parameter_upper_bound = np.ceil(parameter_values.max())
print('Range of '+ parameter+ ': (' + str(parameter_lower_bound) + ', ' + str(parameter_upper_bound) + ')')
if option_plot_histogram == 1:
plot_var_histogram()
#%% Plots for a single glacier
def plot_eachglacier(ds, option_merged_dataset=0):
# Set position of dataset to plot in list based on using merged or unmerged elev bin data
# [2 = 10m, 6 = merged]
if option_merged_dataset == 0:
ds_position = 2
elif option_merged_dataset == 1:
ds_position = 6
individual_fp = fig_fp + 'individual_plots/'
if not os.path.exists(individual_fp):
os.makedirs(individual_fp)
# Loop through glaciers and plot
for glac in range(len(ds)):
#pull values from binnedcsv into vars
glac_elevbins = ds[glac][ds_position]['bin_center_elev_m']
glac_area_t1 = ds[glac][ds_position]['z1_bin_area_valid_km2']
glac_area_t2 = ds[glac][ds_position]['z2_bin_area_valid_km2']
glac_mb_mwea = ds[glac][ds_position][mb_cn]
glac_debristhick_cm = ds[glac][ds_position]['debris_thick_med_m'] * 100
glac_debrisperc = ds[glac][ds_position]['perc_debris']
glac_pondperc = ds[glac][ds_position]['perc_pond']
glac_elevnorm = ds[glac][ds_position]['elev_norm']
glac_dhdt_med = ds[glac][ds_position]['dhdt_bin_med_ma']
glac_dhdt_norm_huss = ds[glac][ds_position]['dhdt_norm_huss']
glac_dhdt_norm_shifted = ds[glac][ds_position]['dhdt_norm_shifted']
glac_elevs = ds[glac][ds_position]['bin_center_elev_m']
glacwide_mb_mwea = (glac_area_t1 * glac_mb_mwea).sum() / glac_area_t1.sum()
glac_name = ds[glac][1].split('-')[1]
# dhdt (raw) vs. elevation (raw)
plt.figure(figsize = (20, 12))
plt.plot(glac_elevs, glac_dhdt_med, label=glac_name)
plt.gca().invert_xaxis()
plt.xlabel('Elevation (m)')
plt.ylabel('Ice thickness Change [m/a]')
plt.title('Raw dh/dt\n')
plt.minorticks_on()
# Plot Elevation bins vs. Area, Mass balance, and Debris thickness/pond coverage/ debris coverage
plt.figure(figsize=(10,6))
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.suptitle(ds[glac][1], y=0.94)
# Elevation vs. Area
plt.subplot(1,3,1)
plt.plot(glac_area_t1, glac_elevbins, label='t1')
plt.plot(glac_area_t2, glac_elevbins, label='t2')
plt.ylabel('Elevation [masl, WGS84]')
plt.xlabel('Glacier area [km2]')
plt.minorticks_on()
plt.legend()
# Elevation vs. Mass Balance
plt.subplot(1,3,2)
plt.plot(glac_mb_mwea, glac_elevbins, 'k-', label=str(round(glacwide_mb_mwea, 2)) + ' mwea')
# k refers to the color (k=black, b=blue, g=green, etc.)
# - refers to using a line (-- is a dashed line, o is circle points, etc.)
plt.ylabel('Elevation [masl, WGS84]')
plt.xlabel('Mass balance [mwea]')
plt.xlim(-3, 3)
plt.xticks(np.arange(-3, 3 + 1, 1))
plt.axvline(x=0, color='k')
plt.fill_betweenx(glac_elevbins, glac_mb_mwea, 0, where=glac_mb_mwea<0, color='r', alpha=0.5)
plt.fill_betweenx(glac_elevbins, glac_mb_mwea, 0, where=glac_mb_mwea>0, color='b', alpha=0.5)
plt.legend(loc=1)
plt.minorticks_on()
plt.gca().axes.get_yaxis().set_visible(False)
# Elevation vs. Debris Area, Pond Area, Thickness
plt.subplot(1,3,3)
plt.plot(glac_debrisperc, glac_elevbins, label='Debris area')
plt.plot(glac_pondperc, glac_elevbins, label='Pond area')
plt.plot(glac_debristhick_cm, glac_elevbins, 'k-', label='Thickness')
plt.ylabel('Elevation [masl, WGS84]')
plt.xlabel('Debris thickness [cm], Area [%]')
plt.minorticks_on()
plt.legend()
plt.gca().axes.get_yaxis().set_visible(False)
if option_savefigs == 1:
plt.savefig(individual_fp + '/mb_fig' + ds[glac][1] + '_mb_aed.png', bbox_inches='tight')
plt.show()
# Elevation change vs. Elevation
plt.figure(figsize=(10,3))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# Normalized curves using range of dh/dt
plt.subplot(1,3,1)
plt.plot(glac_elevs, glac_dhdt_med, label=ds[glac][1])
plt.gca().invert_xaxis()
plt.xlabel('Elevation [m]')
plt.ylabel('dh/dt [m/a]')
plt.title('dhdt vs elevation')
plt.minorticks_on()
plt.legend()
# Normalized curves using dhdt max (according to Huss)
plt.subplot(1,3,2)
plt.plot(glac_elevnorm, glac_dhdt_norm_huss, label=ds[glac][1])
plt.xlabel('Normalized elev range')
plt.ylabel('Normalized dh/dt [ma]')
plt.title('huss normalization')
if glac_dhdt_med.min() < 0:
plt.gca().invert_yaxis()
plt.minorticks_on()
plt.legend()
# Normalized curves shifting all values to be negative
plt.subplot(1,3,3)
plt.plot(glac_elevnorm, glac_dhdt_norm_shifted, label=ds[glac][1])
plt.ylim(1,0)
plt.xlabel('Normalized elev range')
plt.title('shifted normalization')
plt.minorticks_on()
plt.legend()
if option_savefigs == 1:
plt.savefig(individual_fp + 'Single_Plots' + ds[glac][1] + '_normcurves.png', bbox_inches='tight')
plt.show()
if option_plot_eachglacier == 1:
plot_eachglacier(ds, option_merged_dataset=option_use_mergedata)
#%% Plot multiple glaciers on the same plot
def plot_multipleglaciers_single_threshold(ds, option_merged_dataset=0, parameter='Area', threshold_n=0):
# Set position of dataset to plot in list based on using merged or unmerged data
# [2 = 10m, 6 = merged]
if option_merged_dataset == 0:
ds_position = 2 #refer to binnedcsv
elif option_merged_dataset == 1:
ds_position = 6 #refers to the ds of merged elev bin data
#plot empty figure
plt.figure(figsize=(10,6))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
#set counters to keep track of total number of glac > and < threshold
count_lt = 0
count_gt = 0
norm_list_gt = []
norm_list_lt = []
# Parameter values
# parameter_values = np.array([ds[i][3][parameter] for i in range(len(ds))])
#loop through each glacier, in order of ascending parameter, accessing binnedcsv values
for glac in range(len(ds)):
glac_rgi = ds[glac][3]
# glac_elevbins = ds[glac][ds_position]['bin_center_elev_m']
# glac_area_t1 = ds[glac][ds_position]['z1_bin_area_valid_km2']
# glac_area_t2 = ds[glac][ds_position]['z2_bin_area_valid_km2']
# glac_area_t1_perc = ds[glac][ds_position]['z1_bin_area_perc']
# glac_bin_count_t1 = ds[glac][ds_position]['z1_bin_count_valid']
# glac_mb_mwea = ds[glac][ds_position][mb_cn]
# glac_debristhick_cm = ds[glac][ds_position]['debris_thick_med_m'] * 100
# glac_debrisperc = ds[glac][ds_position]['perc_debris']
# glac_pondperc = ds[glac][ds_position]['perc_pond']
glac_elevnorm = ds[glac][ds_position]['elev_norm']
glac_dhdt_norm_huss = ds[glac][ds_position]['dhdt_norm_huss']
# glac_dhdt_norm_shifted = ds[glac][ds_position]['dhdt_norm_shifted']
glac_dhdt_med = ds[glac][ds_position]['dhdt_bin_med_ma']
# glac_dhdt_mean = ds[glac][ds_position]['dhdt_bin_mean_ma']
# glac_dhdt_std = ds[glac][ds_position]['dhdt_bin_std_ma']
glac_elevs = ds[glac][ds_position]['bin_center_elev_m']
# glacwide_mb_mwea = (glac_area_t1 * glac_mb_mwea).sum() / glac_area_t1.sum()
glac_name = ds[glac][1].split('-')[1]
# Subset parameters based on column name and threshold
if glac_rgi[parameter] < threshold_n:
count_lt += 1
# Make list of array containing elev_norm and dhdt_norm_huss
norm_list_lt.append(np.array([glac_elevnorm.values,
glac_dhdt_norm_huss.values]).transpose())
# dhdt (raw) vs. elevation (raw)
plt.subplot(2,2,1)
plt.plot(glac_elevs, glac_dhdt_med, label=glac_name)
if count_lt == 1:
plt.gca().invert_xaxis()
plt.xlabel('Elevation (m)')
plt.ylabel('dh/dt [m/a]')
plt.title('Raw dh/dt\n' + parameter + '<' + str(threshold_n))
plt.minorticks_on()
# Huss Norm dhdt vs. Norm Elev
plt.subplot(2,2,2)
plt.rcParams.update({'font.size': 12})
plt.plot(glac_elevnorm, glac_dhdt_norm_huss, label=glac_name, alpha=glacier_plots_transparency)
plt.xlabel('Normalized Elevation Range')
plt.ylabel('Normalized dh/dt')
if count_lt == 1:
plt.gca().invert_yaxis()
plt.title('Huss Normalization (' + str(count_lt) + ' Glaciers)\n' + parameter + '<' + str(threshold_n))
plt.minorticks_on()
if option_show_legend == 1:
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.)
# Subset parameters based on column name and threshold
elif glac_rgi[parameter] >= threshold_n:
count_gt += 1
# Make list of array containing elev_norm and dhdt_norm_huss
norm_list_gt.append(np.array([glac_elevnorm.values,
glac_dhdt_norm_huss.values]).transpose())
# dhdt vs. elevation
plt.subplot(2,2,3)
plt.plot(glac_elevs, glac_dhdt_med, label=glac_name)
if count_gt == 1:
plt.gca().invert_xaxis()
plt.xlabel('Elevation (m)')
plt.ylabel('dh/dt [m/a]')
plt.title('Raw dh/dt\n' + parameter + '>' + str(threshold_n))
plt.minorticks_on()
# Normalized curves using dhdt max (according to Huss)
plt.subplot(2,2,4)
plt.plot(glac_elevnorm, glac_dhdt_norm_huss, label=glac_name, alpha=glacier_plots_transparency)
plt.xlabel('Normalized Elevation Range')
plt.ylabel('Normalized dh/dt')
if count_gt == 1:
plt.gca().invert_yaxis()
plt.title('Huss Normalization (' + str(count_gt) +' Glaciers)\n' + parameter + '>' + str(threshold_n))
plt.minorticks_on()
#display legend, if defined as such in "Input Data" section
if option_show_legend == 1:
plt.legend(bbox_to_anchor=(1.2, 1), loc=3, borderaxespad=0.)
print(count_gt, 'Glaciers above threshold for', parameter)
print(count_lt, 'Glaciers below threshold for', parameter)
# Put mean and plus/minus 1 standard deviation on normalized plots
norm_lt_stats = pd.DataFrame()
norm_gt_stats = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Fast geotagging of big data (with a progress bar!)
See README.md or geotag.py --help for more information.
"""
import dataclasses
import typing
import geopandas
import pandas
import shapely.geometry
import rtree
import tqdm
tqdm.tqdm.pandas()
@dataclasses.dataclass
class GeotagInput:
input_file: str
input_column: str
output_column: str
class Geotagger:
""" Class encapsulating fast lookups of unique identifiers from spatial
data using an R-tree index.
Attributes:
id_strs: A dict that maps the integer representation of each unique ID
to its original string representation - rtree only supports
integer keys, so this allows us to get back things like leading
zeroes that become lost in the type conversion.
shapes: A dict that maps the integer representation of each unique ID
to its corresponding shape.
index: The R-tree index.
"""
def __init__(self,
gdf: geopandas.geodataframe.GeoDataFrame,
id_column: str = "GEOID",
verbose: bool = False
) -> None:
""" Initialize Geotagger object.
Args:
gdf: A GeoDataFrame containing polygons to use for geotagging.
id_column: The column of the GeoDataFrame (e.g. field of a
shapefile) to pull unique IDs from (must be convertable to int).
verbose: If True, print what is being done.
"""
self.id_strs = {int(id_): id_ for id_ in gdf[id_column]}
self.shapes = gdf.set_index(id_column)["geometry"].to_dict()
self.index = rtree.index.Index()
iterable = self.shapes.items()
if verbose:
iterable = tqdm.tqdm(
self.shapes.items(),
"Creating rtree index",
unit=" indexed"
)
for id_, shape in iterable:
self.index.insert(int(id_), shape.bounds)
def lookup(self, x: float, y: float) -> typing.Optional[int]:
""" Look up a coordinate pair's unique ID.
Args:
x: The longitude, as a float.
y: The latitude, as a float.
Returns:
The unique ID, if any.
"""
results = list(self.index.intersection((x, y, x, y)))
# single result: return it
if len(results) == 1:
return self.id_strs[results[0]]
# multiple results: check which polygon contains the point
else:
point = shapely.geometry.Point(x, y)
for id_ in results:
id_str = self.id_strs[id_]
shape = self.shapes[id_str]
if shape.contains(point):
return id_str
def parse_geotag_input(geotag_input: str) -> GeotagInput:
""" Parse a geotag operation instructions string.
Instruction strings should have the following format:
input_file$input_column>output_column
Args:
geotag_input: A string containing instructions for a geotag operation.
Returns: A GeotagInput containing the parsed geotag_input.
"""
(input_file, other_fields) = geotag_input.split("$")
(input_column, output_column) = other_fields.split(">")
return GeotagInput(
input_file.strip(), input_column.strip(), output_column.strip()
)
def dummy_function(*_, **__) -> None:
""" A function that does nothing. """
pass
if __name__ == "__main__":
import argparse
import glob
import copy
parser = argparse.ArgumentParser()
parser.add_argument(
"geotag", nargs="+",
help="A list of geotag operation instructions. These should be in the format \"input_file$input_column>output_column\". This is passed directly into geopandas.read_file(). To directly read compressed shapefile archives, use \"zip://path/to/shapefile.zip\". NOTE: Be careful about bash! Be sure to enclose in single quotes (lack of quotes will register \">\" as an output redirect; double quotes will still register \"$\" as a variable prefix. Globs are also supported, e.g. \"tl_2020*bg.shp$GEOID>geoid_bg\", but all globbed inputs should have the same fields."
)
parser.add_argument(
"-i", "--input", required=True,
help="The path to the input file. This is passed directly into pandas.read_csv()."
)
parser.add_argument(
"-o", "--output", required=True,
help="The path to the output file. This is passed directly into pandas.core.frame.DataFrame.write_csv(). For compression, simply append \".gz\", etc."
)
parser.add_argument(
"-l", "--longitude", required=True, metavar="LONGITUDE_FIELD",
help="The name of the field containing longitude data in the input file."
)
parser.add_argument(
"-L", "--latitude", required=True, metavar="LATITUDE_FIELD",
help="The name of the field containing latitude data in the input file."
)
parser.add_argument(
"-s", "--subset", metavar="SUBSET_COLUMNS",
help="Optional. Mutually exclusive with -r/--rownames-only. A comma-separated list of fields to subset the input file to. This is passed to pandas.read_csv(), so this can be useful for limiting memory usage."
)
parser.add_argument(
"-r", "--rownames-only", action="store_true", default=False,
help="Optional. Mutually exclusive with -s/--subset. Creates a new column containing the data frame rownames and drops all other columns from the input. Rownames will be R-style, starting at 1, rather than pandas-style, starting at 0."
)
parser.add_argument(
"-f", "--force-overwrite", action="store_true", default=False,
help="Optional. Allows overwriting of existing columns in the data frame post-subsetting. Will not allow for overwriting of the longitude or latitude columns."
)
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Optional. Causes geotag.py to print out everything it is doing (otherwise the script will run without any output)."
)
args = parser.parse_args()
# verbosity settings
display = print
if not args.verbose:
display = dummy_function
# input validation
geotag_inputs = []
for geotag_input in args.geotag:
try:
geotag_inputs.append(parse_geotag_input(geotag_input))
except:
raise Exception("could not parse geotag instructions {}".format(geotag_input))
original_subset_columns = None
subset_columns = None
if args.subset:
original_subset_columns = args.subset.split(",")
subset_columns = copy.copy(original_subset_columns)
# need to force inclusion of longitude and latitude columns; remove later
if args.longitude not in subset_columns:
display(
"Forcing inclusion of \"{}\" pandas.read_csv (will remove later)"
.format(args.longitude)
)
subset_columns.append(args.longitude)
if args.latitude not in subset_columns:
display(
"Forcing inclusion of \"{}\" in pandas.read_csv (will remove later)"
.format(args.latitude)
)
subset_columns.append(args.latitude)
if args.subset and args.rownames_only:
raise Exception("-s/--subset and -r/--rownames-only are mutually-exclusive")
# check for duplicate output columns
output_columns = [
geotag_input.output_column
for geotag_input in geotag_inputs
]
if len(output_columns) != len(set(output_columns)):
for column in set(output_columns):
# inefficient, but probably not an issue due to small size
output_columns.remove(column)
raise Exception(
"The following output columns are duplicated: {}"
.format(", ".join(output_columns))
)
if args.longitude in output_columns:
raise Exception(
"Longitude column \"{}\" overlaps with an output column"
.format(args.longitude)
)
if args.latitude in output_columns:
raise Exception(
"Latitude column \"{}\" overlaps with an output column"
.format(args.latitude)
)
# read input
if args.subset:
display(
"Reading input file (subsetting to {} as per -s/--subset): {}"
.format(subset_columns, args.input)
)
df = pandas.read_csv(args.input, usecols=subset_columns)
elif args.rownames_only:
required_columns = [args.longitude, args.latitude]
display(
"Reading input file (subsetting to {} as per -r/--rownames-only): {}"
.format(required_columns, args.input)
)
df = pandas.read_csv(args.input, usecols=required_columns)
else:
display("Reading input file: {}".format(args.input))
df = pandas.read_csv(args.input)
# drop null coordinates
n_original_rows = len(df)
df = df.dropna(subset=[args.longitude, args.latitude])
n_dropped_rows = n_original_rows - len(df)
display(
"Dropped {}/{} columns with missing coordinates ({:0.2f}%)".format(
n_dropped_rows, n_original_rows, n_dropped_rows/n_original_rows*100
)
)
# generate rownames
if args.rownames_only:
display("Generating rownames")
df["rowname"] = | pandas.Series(df.index) | pandas.Series |
"""Tests various time series functions which are used extensively in tcapy
"""
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import timedelta
from pandas.testing import assert_frame_equal
from tcapy.util.timeseries import TimeSeriesOps
from tcapy.util.customexceptions import *
from test.config import *
ticker = 'EURUSD'
start_date = '20 Apr 2017'
finish_date = '07 Jun 2017'
def test_vlookup():
"""Runs a test for the VLOOKUP function which is used extensively in a lot of the metric construction
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
rand_data = np.random.random(len(dt))
df_before = pd.DataFrame(index=dt, columns=['rand'], data=rand_data)
millseconds_tests = [100, 500]
# Try perturbing by nothing, then 100 and 500 milliseconds
for millseconds in millseconds_tests:
df_perturb = pd.DataFrame(index=dt - timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
# Do a VLOOKUP (which should give us all the previous ones) - take off the last point (which would be AFTER
# our perturbation)
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt[0:-1], df_perturb, 'rand')
df_after = pd.DataFrame(index=dt_search + timedelta(milliseconds=millseconds), data=search.values,
columns=['rand'])
# check the search dataframes are equal
| assert_frame_equal(df_before[0:-1], df_after, check_dtype=False) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, '<NAME>/<NAME>'),
(2515, '<NAME>'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
"""Convert summary from _read_log to scores dict and effective timestamp.
Parameters:
- summary: dict with int(pc4) -> [(query_time, appt_time), ...]
Return:
- scores dict: int(pc4) -> score (int or float or '?')
- timestamp: middle query timestamp of this run.
"""
# Convert to number codes.
scores = {k: '?' for k in PCODES}
multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)
for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) # query time
qtms.append(qtm)
atm = min(v[1] for v in vlist) # earliest appointment time
qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
"""Return minimum and median wait Timedelta between scan time and appointment.
summary is dict of pc4 -> list of timestamps
No data -> 999 h.
For the median, NaT is counted as infinite.
"""
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
"""Return DataFrame and list of start times (+1)."""
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
# start_tms: list of scan start times (plus one extra at the end)
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
"""Return DataFrame and list of start times (+1)"""
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
"""Get scan scores as pc4 -> score dict.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_range: (tm_start, tm_stop) timestamps.
Return:
- tstamp: timestamp of the scan (mid-point)
- scores: dict of pc4->score
- min_wait: Timedelta of minimum wait time from scan to appointment
"""
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
"""Get scan scores as dataframe, from csv dataframe.
Blacklisted scan times are dropped.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_ranges: list of timestamps (+one at the end) with boundaries
of timestamp ranges.
- decimal_comma: True to have string values 6,3 rather than float 6.3.
Return:
- Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.
"""
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime("%Y-%m-%d %H:%M")}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = | pd.Timestamp('2020-08-03') | pandas.Timestamp |
import os
from nose.tools import *
import unittest
import pandas as pd
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests',
'test_datasets', 'catalog'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class CatalogManagerTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_get_property_valid_df_name_1(self):
# cm.del_catalog()
df = read_csv_metadata(path_a)
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
def test_get_property_valid_df_name_2(self):
# cm.del_catalog()
self.assertEqual(cm.get_catalog_len(), 0)
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_property(C, 'key'), '_id')
self.assertEqual(cm.get_property(C, 'fk_ltable'), 'ltable_ID')
self.assertEqual(cm.get_property(C, 'fk_rtable'), 'rtable_ID')
self.assertEqual(cm.get_property(C, 'ltable').equals(A), True)
self.assertEqual(cm.get_property(C, 'rtable').equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_property_invalid_df_1(self):
cm.get_property(10, 'key')
@raises(AssertionError)
def test_get_property_invalid_path_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
cm.get_property(A, None)
# cm.del_catalog()
@raises(KeyError)
def test_get_property_df_notin_catalog(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.get_property(A, 'key')
# cm.del_catalog()
def test_set_property_valid_df_name_value(self):
# cm.del_catalog()
df = pd.read_csv(path_a)
cm.set_property(df, 'key', 'ID')
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_invalid_df(self):
# cm.del_catalog()
cm.set_property(None, 'key', 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_valid_df_invalid_prop(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.set_property(A, None, 'ID')
# cm.del_catalog()
def test_init_properties_valid(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.init_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), True)
# cm.del_catalog()
@raises(AssertionError)
def test_init_properties_invalid_df(self):
cm.init_properties(None)
def test_get_all_properties_valid_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
m = cm.get_all_properties(A)
self.assertEqual(len(m), 1)
self.assertEqual(m['key'], 'ID')
# cm.del_catalog()
def test_get_all_properties_valid_2(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
m = cm.get_all_properties(C)
self.assertEqual(len(m), 5)
self.assertEqual(m['key'], '_id')
self.assertEqual(m['fk_ltable'], 'ltable_ID')
self.assertEqual(m['fk_rtable'], 'rtable_ID')
self.assertEqual(m['ltable'].equals(A), True)
self.assertEqual(m['rtable'].equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_all_properties_invalid_df_1(self):
# cm.del_catalog()
C = cm.get_all_properties(None)
@raises(KeyError)
def test_get_all_properties_invalid_df_2(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
C = cm.get_all_properties(A)
def test_del_property_valid_df_name(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key')
self.assertEqual(len(cm.get_all_properties(A)), 0)
@raises(AssertionError)
def test_del_property_invalid_df(self):
cm.del_property(None, 'key')
@raises(AssertionError)
def test_del_property_invalid_property(self):
A = read_csv_metadata(path_a)
cm.del_property(A, None)
@raises(KeyError)
def test_del_property_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_property(A, 'key')
@raises(KeyError)
def test_del_property_prop_notin_catalog(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key1')
def test_del_all_properties_valid_1(self):
A = read_csv_metadata(path_a)
cm.del_all_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), False)
def test_del_all_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.del_all_properties(C)
self.assertEqual(cm.is_dfinfo_present(C), False)
@raises(AssertionError)
def test_del_all_properties_invalid_df(self):
cm.del_all_properties(None)
@raises(KeyError)
def test_del_all_properties_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_all_properties(A)
def test_get_catalog_valid(self):
A = read_csv_metadata(path_a)
cg = cm.get_catalog()
self.assertEqual(len(cg), 1)
def test_del_catalog_valid(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
cg = cm.get_catalog()
self.assertEqual(len(cg), 0)
def test_is_catalog_empty(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
self.assertEqual(cm.is_catalog_empty(), True)
def test_is_dfinfo_present_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, True)
def test_is_dfinfo_present_valid_2(self):
A = pd.read_csv(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_dfinfo_present_invalid(self):
cm.is_dfinfo_present(None)
def test_is_property_present_for_df_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key')
self.assertEqual(status, True)
def test_is_property_present_for_df_valid_2(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key1')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_property_present_for_df_invalid_df(self):
cm.is_property_present_for_df(None, 'key')
@raises(KeyError)
def test_is_property_present_for_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.is_property_present_for_df(A, 'key')
def test_catalog_len(self):
A = read_csv_metadata(path_a)
self.assertEqual(cm.get_catalog_len(), 1)
def test_set_properties_valid_1(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.init_properties(B)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
def test_set_properties_valid_2(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
@raises(AssertionError)
def test_set_properties_invalid_df_1(self):
cm.set_properties(None, {})
@raises(AssertionError)
def test_set_properties_invalid_dict_1(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, None)
def test_set_properties_df_notin_catalog_replace_false(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, {}, replace=False)
self.assertEqual(cm.get_key(A), 'ID')
# def test_has_property_valid_1(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key'), True)
#
# def test_has_property_valid_2(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key1'), False)
#
# @raises(AssertionError)
# def test_has_property_invalid_df(self):
# cm.has_property(None, 'key')
#
# @raises(AssertionError)
# def test_has_property_invalid_prop_name(self):
# A = read_csv_metadata(path_a)
# cm.has_property(A, None)
#
# @raises(KeyError)
# def test_has_property_df_notin_catalog(self):
# A = pd.read_csv(path_a)
# cm.has_property(A, 'key')
def test_copy_properties_valid_1(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
self.assertEqual(cm.is_dfinfo_present(A1), True)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
def test_copy_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
C1 = pd.read_csv(path_c)
cm.copy_properties(C, C1)
self.assertEqual(cm.is_dfinfo_present(C1), True)
p = cm.get_all_properties(C1)
p1 = cm.get_all_properties(C1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(C1), cm.get_key(C))
self.assertEqual(cm.get_ltable(C1).equals(A), True)
self.assertEqual(cm.get_rtable(C1).equals(B), True)
self.assertEqual(cm.get_fk_ltable(C1), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(C1), cm.get_fk_rtable(C))
@raises(AssertionError)
def test_copy_properties_invalid_tar_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(A, None)
@raises(AssertionError)
def test_copy_properties_invalid_src_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(None, A)
def test_copy_properties_update_false_1(self):
A = read_csv_metadata(path_a)
A1 = read_csv_metadata(path_a)
status=cm.copy_properties(A, A1, replace=False)
self.assertEqual(status, False)
def test_copy_properties_update_false_2(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1, replace=False)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
@raises(KeyError)
def test_copy_properties_src_df_notin_catalog(self):
A = pd.read_csv(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
def test_get_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_get_key_invalid_df(self):
cm.get_key(None)
@raises(KeyError)
def test_get_key_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.get_key(A)
def test_set_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_set_key_invalid_df(self):
cm.set_key(None, 'ID')
@raises(KeyError)
def test_set_key_notin_df(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID1')
def test_set_key_with_dupids(self):
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_set_key_with_mvals(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_get_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_ltable(C), cm.get_property(C, 'fk_ltable'))
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_get_fk_ltable_invalid_df(self):
cm.get_fk_ltable(None)
def test_get_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_rtable(C), cm.get_property(C, 'fk_rtable'))
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_get_fk_rtable_invalid_df(self):
cm.get_fk_rtable(None)
def test_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_set_fk_ltable_invalid_df(self):
cm.set_fk_ltable(None, 'ltable_ID')
@raises(KeyError)
def test_set_fk_ltable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID1')
def test_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID')
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_set_fk_rtable_invalid_df(self):
cm.set_fk_rtable(None, 'rtable_ID')
@raises(KeyError)
def test_set_fk_rtable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID1')
def test_validate_and_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
def test_validate_and_set_fk_ltable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_ltable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_rtable(C), 'ltable_ID')
def test_validate_and_set_fk_rtable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
# def test_get_reqd_metadata_from_catalog_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, 'key')
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, ['key'])
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_3(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable', 'fk_rtable', 'ltable', 'rtable'])
# self.assertEqual(d['key'], cm.get_key(C))
# self.assertEqual(d['fk_ltable'], cm.get_fk_ltable(C))
# self.assertEqual(d['fk_rtable'], cm.get_fk_rtable(C))
# self.assertEqual(cm.get_ltable(C).equals(A), True)
# self.assertEqual(cm.get_rtable(C).equals(B), True)
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_1(self):
# cm.get_reqd_metadata_from_catalog(None, ['key'])
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_2(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable1', 'fk_rtable', 'ltable', 'rtable'])
#
#
# def test_update_reqd_metadata_with_kwargs_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key'])
# self.assertEqual(metadata['key'], d['key'])
#
# def test_update_reqd_metadata_with_kwargs_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, 'key')
# self.assertEqual(metadata['key'], d['key'])
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(None, d, 'key')
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(d, None, 'key')
#
# @raises(AssertionError)
# def test_update_reqd_metadata_with_kwargs_invalid_elts(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key1'])
# def test_get_diff_with_reqd_metadata_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, 'key1')
# self.assertEqual(len(d1), 1)
#
# def test_get_diff_with_reqd_metadata_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, ['key1'])
# self.assertEqual(len(d1), 1)
#
# @raises(AssertionError)
# def test_get_diff_with_reqd_metadata_invalid_dict(self):
# d1 = cm._get_diff_with_required_metadata(None, ['key1'])
# def test_is_all_reqd_metadata_present_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, 'key'),True)
#
# def test_is_all_reqd_metadata_present_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key']),True)
#
# def test_is_all_reqd_metadata_present_valid_3(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key1']), False)
#
# @raises(AssertionError)
# def test_is_all_reqd_metadata_present_invalid_dict(self):
# cm.is_all_reqd_metadata_present(None, 'key')
def test_show_properties_for_df_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_2(self):
A = pd.read_csv(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties(C)
def test_show_properties_for_objid_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties_for_id(id(A))
@raises(KeyError)
def test_show_properties_for_objid_err_1(self):
A = pd.read_csv(path_a)
cm.show_properties_for_id(id(A))
def test_show_properties_for_objid_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties_for_id(id(C))
def test_validate_metadata_for_table_valid_1(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', None, False)
self.assertEqual(status, True)
def test_validate_metadata_for_table_valid_2(self):
import logging
logger = logging.getLogger(__name__)
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', logger, True)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_table_invalid_df(self):
status = cm._validate_metadata_for_table(None, 'ID', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notin_catalog(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID1', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, None, 'table', None, False)
@raises(AssertionError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'zipcode', 'table', None, False)
def test_validate_metadata_for_candset_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_candset_invalid_df(self):
status = cm._validate_metadata_for_candset(None, '_id', 'ltable_ID', 'rtable_ID', None, None,
'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_id_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, 'id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_ltable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltableID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_rtable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtableID', A, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_ltable(self):
B = pd.read_csv(path_b)
C = | pd.read_csv(path_c) | pandas.read_csv |
import pytest
from ..BPtPipeline import BPtPipeline
from .helpers import ToFixedTransformer, get_fake_mapping, clean_fake_mapping
from ..ScopeObjs import ScopeTransformer
from ..BPtModel import BPtModel
from ..BPtLoader import BPtLoader
from ...extensions import Identity
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import os
import tempfile
from ...default.params.Params import Choice, TransitionChoice
from .helpers import get_param_search
from ..BPtSearchCV import NevergradSearchCV
import shutil
from joblib import hash as joblib_hash
from sklearn.feature_selection import SelectKBest
from ..BPtFeatureSelector import BPtFeatureSelector
from ..BPtTransformer import BPtTransformer
import warnings
from sklearn.decomposition import PCA
def test_BPtPipeline():
# 'loaders', 'imputers',
# 'scalers',
# 'transformers',
# 'feat_selectors', 'model']
steps = []
to_ones = ToFixedTransformer(to=1)
st = ScopeTransformer(estimator=to_ones, inds=[1, 2])
steps.append(('to_ones', st))
model = BPtModel(estimator=LinearRegression(), inds=[0, 1])
steps.append(('model', model))
pipe = BPtPipeline(steps=steps)
X = np.zeros((3, 3))
y = np.ones(3)
pipe.fit(X, y)
assert pipe['to_ones'].inds_ == [1, 2]
# Should update so that next inds are 0, 2
# as 1 -> 0, 2 -> 1, 0 -> 2, so 0,1 -> 2, 0, sorted = 0, 2
assert pipe['model'].inds_ == [0, 2]
assert len(pipe.mapping_) == 3
assert pipe.mapping_[0] == 2
assert pipe.mapping_[1] == 0
assert pipe.mapping_[2] == 1
# Make sure when re-fit resets mapping each time
pipe.fit(X, y)
assert pipe.mapping_[0] == 2
assert pipe.mapping_[1] == 0
assert pipe.mapping_[2] == 1
# Test propegate n_jobs
pipe.n_jobs = 2
assert pipe['to_ones'].n_jobs == 2
assert pipe['to_ones'].estimator.n_jobs == 2
X_df = pd.DataFrame(X)
X_trans = pipe.transform_df(X_df)
assert X_trans[0].sum() == 0
assert X_trans[1].sum() == 3
assert X_trans[2].sum() == 3
def test_skip_loader_no_inds():
steps = []
loader = BPtLoader(estimator=Identity(),
inds=[],
file_mapping={})
steps.append(('loader', loader))
to_ones = ToFixedTransformer(to=1)
st = ScopeTransformer(estimator=to_ones, inds=[1, 2])
steps.append(('to_ones', st))
model = BPtModel(estimator=LinearRegression(), inds=[0, 1])
steps.append(('model', model))
pipe = BPtPipeline(steps=steps)
X = np.zeros((3, 3))
y = np.ones(3)
# If no errors then means worked
# since loader isn't constructed correctly.
pipe.fit(X, y)
assert pipe.steps[0][1].estimator_ is None
def test_file_mapping_hash():
# Make sure that regardless of DataFile position
# in memory, that it hashes correctly.
mapping = get_fake_mapping(10)
h1 = joblib_hash(mapping)
clean_fake_mapping(10)
mapping = get_fake_mapping(10)
h2 = joblib_hash(mapping)
clean_fake_mapping(10)
assert h1 == h2
def run_pipe_with_loader_ts(cache_loc=None):
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
file_mapping = get_fake_mapping(100)
loader = BPtLoader(estimator=Identity(),
inds=[0, 1],
file_mapping=file_mapping,
n_jobs=1,
fix_n_jobs=False,
cache_loc=None)
steps.append(('loader', loader))
# Add transformer to ones
# input here should be (5, 8) of real val, original
# inds of 0 should work on half
to_ones = ToFixedTransformer(to=1)
st = ScopeTransformer(estimator=to_ones, inds=[0])
steps.append(('to_ones', st))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=[0, 1])
param_dists = {'estimator__fit_intercept': Choice([True, False])}
search_model = NevergradSearchCV(estimator=model,
ps=get_param_search(),
param_distributions=param_dists)
steps.append(('model', search_model))
# Create pipe
pipe = BPtPipeline(steps=steps,
cache_loc=cache_loc)
X = np.arange(100).reshape((50, 2))
y = np.ones(50)
pipe.fit(X, y, fit_index=np.arange(50))
# Make sure fit worked correctly
assert pipe[0].n_features_in_ == 2
assert pipe[1].n_features_in_ == 8
assert pipe[1].estimator_.n_features_in_ == 4
assert len(pipe.mapping_[0]) == 4
assert len(pipe.mapping_[1]) == 4
assert 7 in pipe.mapping_[1]
# Make sure reverse transform works
X_df = pd.DataFrame(X)
X_trans = pipe.transform_df(X_df)
assert X_trans.shape == (50, 8)
assert X_trans.loc[4, '1_3'] == 9
assert X_trans.loc[1, '1_2'] == 3
assert X_trans.loc[4, '0_0'] == 1
assert X_trans.loc[0, '0_0'] == 1
# Make sure predict works,
# seems safe to assume model
# can learn to predict 1's
# as all targets are 1's.
# but may need to change?
preds = pipe.predict(X)
assert np.all(preds > .99)
# Check bpt pipeline coef attribute
assert np.array_equal(pipe[-1].best_estimator_.coef_,
pipe.coef_)
# Clean fake file mapping
clean_fake_mapping(100)
return pipe
def test_pipeline_with_loader():
# Base pipeline with loader tests
run_pipe_with_loader_ts(cache_loc=None)
def test_pipeline_fit_caching():
# Run with cache fit dr
cache_loc =\
os.path.join(tempfile.gettempdir(), 'test_cache')
# If already exists, say from a failed test
# delete
if os.path.exists(cache_loc):
shutil.rmtree(cache_loc)
pipe = run_pipe_with_loader_ts(cache_loc=cache_loc)
# Make sure computed hash + saved copy
assert hasattr(pipe, 'hash_',)
assert os.path.exists(pipe._get_hash_loc())
# Delete existing pipe
del pipe
# Run again a few times to make sure loading from cache works
for i in range(5):
pipe = run_pipe_with_loader_ts(cache_loc=cache_loc)
assert hasattr(pipe, 'hash_')
assert pipe.loaded_ is True
del pipe
# Removed cached once done
shutil.rmtree(cache_loc)
def test_pipeline_inverse_transform_FIs_loader_static_transform():
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
loader = BPtLoader(estimator=Identity(),
inds=[1],
file_mapping=get_fake_mapping(100))
steps.append(('loader', loader))
# Add transformer to ones
# input here should be (5, 8) of real val, original
# inds of 0 should work on half
to_ones = ToFixedTransformer(to=1)
st = ScopeTransformer(estimator=to_ones, inds=Ellipsis)
steps.append(('to_ones', st))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=Ellipsis)
steps.append(('model', model))
# Create pipe
pipe = BPtPipeline(steps=steps)
X = pd.DataFrame(np.arange(100).reshape((50, 2)))
y = np.ones(50)
pipe.fit(X, y)
# Fake coef
coef_ = [0, 1, 2, 3, 4]
feat_names = pipe.transform_feat_names(X)
fis = pd.Series(coef_, index=feat_names)
inverse_fis = pipe.inverse_transform_FIs(fis)
assert inverse_fis.loc[0] == 4
assert inverse_fis.loc[1].shape == ((2, 2))
assert inverse_fis.loc[1][0][0] == 0
clean_fake_mapping(100)
def test_pipeline_inverse_transform_FIs_loader_fs():
warnings.filterwarnings("ignore")
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
loader = BPtLoader(estimator=Identity(),
inds=[0, 1],
file_mapping=get_fake_mapping(100))
steps.append(('loader', loader))
kbest = SelectKBest(k=3)
fs = BPtFeatureSelector(kbest, Ellipsis)
steps.append(('kbest', fs))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=Ellipsis)
steps.append(('model', model))
# Create pipe
pipe = BPtPipeline(steps=steps)
X = pd.DataFrame(np.arange(100).reshape((50, 2)))
y = np.arange(50)
pipe.fit(X, y)
coef_ = pipe.coef_
feat_names = pipe.transform_feat_names(X)
fis = pd.Series(coef_, index=feat_names)
inverse_fis = pipe.inverse_transform_FIs(fis)
# Don't make assumptions on specific coef
assert inverse_fis.loc[0].shape == (2, 2)
assert inverse_fis.loc[1].shape == (2, 2)
assert np.sum(np.sum(inverse_fis)) == np.sum(coef_)
# Clean up
clean_fake_mapping(100)
def test_pipeline_inverse_transform_FIs_loader_fs_v2():
warnings.filterwarnings("ignore")
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
loader = BPtLoader(estimator=Identity(),
inds=[0, 1],
file_mapping=get_fake_mapping(100))
steps.append(('loader', loader))
to_ones = ToFixedTransformer(to=.15)
st = ScopeTransformer(estimator=to_ones, inds=[0, 1])
steps.append(('to_ones', st))
kbest = SelectKBest(k=3)
fs = BPtFeatureSelector(kbest, Ellipsis)
steps.append(('kbest', fs))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=Ellipsis)
steps.append(('model', model))
# Create pipe
pipe = BPtPipeline(steps=steps)
X = pd.DataFrame(np.arange(100).reshape((50, 2)))
y = np.arange(50)
pipe.fit(X, y)
coef_ = pipe.coef_
feat_names = pipe.transform_feat_names(X)
fis = pd.Series(coef_, index=feat_names)
inverse_fis = pipe.inverse_transform_FIs(fis)
# Don't make assumptions on specific coef
assert inverse_fis.loc[0].shape == (2, 2)
assert inverse_fis.loc[1].shape == (2, 2)
assert np.sum(np.sum(inverse_fis)) == np.sum(coef_)
# Clean up
clean_fake_mapping(100)
def test_pipeline_inverse_transform_FIs_loader_fs_v3():
warnings.filterwarnings("ignore")
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
loader = BPtLoader(estimator=Identity(),
inds=[0, 1],
file_mapping=get_fake_mapping(100))
steps.append(('loader', loader))
to_ones = ToFixedTransformer(to=.15)
st = ScopeTransformer(estimator=to_ones, inds=[0])
steps.append(('to_ones', st))
kbest = SelectKBest(k=3)
fs = BPtFeatureSelector(kbest, Ellipsis)
steps.append(('kbest', fs))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=Ellipsis)
steps.append(('model', model))
# Create pipe
pipe = BPtPipeline(steps=steps)
X = pd.DataFrame(np.arange(100).reshape((50, 2)))
y = np.arange(50)
pipe.fit(X, y)
coef_ = pipe.coef_
feat_names = pipe.transform_feat_names(X)
fis = pd.Series(coef_, index=feat_names)
inverse_fis = pipe.inverse_transform_FIs(fis)
# Don't make assumptions on specific coef
assert inverse_fis.loc[0].shape == (2, 2)
assert inverse_fis.loc[1].shape == (2, 2)
assert np.sum(np.sum(inverse_fis)) == np.sum(coef_)
# Clean up
clean_fake_mapping(100)
def test_pipeline_inverse_transform_FIs_impossible():
warnings.filterwarnings("ignore")
steps = []
# Loader - transform (5, 2) to (5, 8)
# as each DataFile contains np.zeros((2, 2))
loader = BPtLoader(estimator=Identity(),
inds=[0, 1],
file_mapping=get_fake_mapping(100))
steps.append(('loader', loader))
pca = PCA(n_components=3)
st = BPtTransformer(estimator=pca, inds=[0, 1])
steps.append(('pca', st))
# Add basic linear regression model
# Original inds should work on all
model = BPtModel(estimator=LinearRegression(), inds=Ellipsis)
steps.append(('model', model))
# Create pipe
pipe = BPtPipeline(steps=steps)
X = pd.DataFrame(np.arange(100).reshape((50, 2)))
y = np.arange(50)
pipe.fit(X, y)
coef_ = pipe.coef_
feat_names = pipe.transform_feat_names(X)
fis = | pd.Series(coef_, index=feat_names) | pandas.Series |
"""All functions that are not so useful, but still useful."""
from collections import Counter
from collections import OrderedDict
from collections import defaultdict
import errno
import itertools
import math
import os
import re
import sys
import ntpath
import pickle
import subprocess
from scipy import stats
import numpy as np
import pandas as pd
import six
import pybedtools
import pysam
import pyBigWig
from bx.intervals.intersection import IntervalTree
import warnings
from .interval import Interval
# Unmapped, Unmapped+Reverse strand, Not primary alignment,
# Not primary alignment + reverse strand, supplementary alignment
# Source: https://broadinstitute.github.io/picard/explain-flags.html
__SAM_NOT_UNIQ_FLAGS__ = [4, 20, 256, 272, 2048]
CBB_PALETTE = [
"#000000",
"#E69F00",
"#56B4E9",
"#009E73",
"#F0E442",
"#0072B2",
"#D55E00",
"#CC79A7",
]
def order_dataframe(df, columns):
"""Order a dataframe
Order a dataframe by moving the `columns` in the front
Parameters
----------
df: Dataframe
Dataframe
columns: list
List of columns that need to be put in front
"""
if isinstance(columns, six.string_types):
columns = [columns] # let the command take a string or list
remaining_columns = [w for w in df.columns if w not in columns]
df = df[columns + remaining_columns]
return df
def _fix_bed_coltype(bed):
"""Fix bed chrom and name columns to be string
This is necessary since the chromosome numbers are often interpreted as int
"""
bed["chrom"] = bed["chrom"].astype(str)
bed["name"] = bed["name"].astype(str)
return bed
def check_file_exists(filepath):
"""Check if file exists.
Parameters
----------
filepath : str
Path to file
"""
if os.path.isfile(os.path.abspath(filepath)):
return True
return False
def list_to_ranges(list_of_int):
"""Convert a list to a list of range object
Parameters
----------
list_of_int: list
List of integers to be squeezed into range
Returns
-------
list_of_range: list
List of range objects
"""
sorted_list = sorted(set(list_of_int))
for key, group in itertools.groupby(enumerate(sorted_list), lambda x: x[1] - x[0]):
group = list(group)
yield group[0][1], group[-1][1]
def create_ideal_periodic_signal(signal_length):
"""Create ideal ribo-seq signal.
Parameters
----------
signal_length : int
Length of signal to create
Returns
-------
signal : array_like
1-0-0 signal
"""
uniform_signal = np.array([4 / 6.0] * signal_length)
uniform_signal[list(range(1, len(uniform_signal), 3))] = 1 / 6.0
uniform_signal[list(range(2, len(uniform_signal), 3))] = 1 / 6.0
return uniform_signal
def identify_peaks(coverage):
"""Given coverage array, find the site of maximum density"""
return np.argmax(coverage[list(range(-18, -10))])
def millify(n):
"""Convert integer to human readable format.
Parameters
----------
n : int
Returns
-------
millidx : str
Formatted integer
"""
if n is None or np.isnan(n):
return "NaN"
millnames = ["", " K", " M", " B", " T"]
# Source: http://stackoverflow.com/a/3155023/756986
n = float(n)
millidx = max(
0,
min(
len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))
),
)
return "{:.1f}{}".format(n / 10 ** (3 * millidx), millnames[millidx])
def mkdir_p(path):
"""Python version mkdir -p
Parameters
----------
path : str
"""
if path:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def symlink_force(source, destination):
"""Create forcelink forcefully
Parameters
----------
source: string
Location to source file
destination: string
Location to target
"""
try:
os.symlink(source, destination)
except OSError as exc:
if exc.errno == errno.EEXIST:
os.remove(destination)
os.symlink(source, destination)
else:
raise exc
def r2(x, y):
"""Calculate pearson correlation between two vectors.
Parameters
----------
x : array_like
Input
y : array_like
Input
"""
return stats.pearsonr(x, y)[0] ** 2
def round_to_nearest(x, base=5):
"""Round to nearest base.
Parameters
----------
x : float
Input
Returns
-------
v : int
Output
"""
return int(base * round(float(x) / base))
def set_xrotation(ax, degrees):
"""Rotate labels on x-axis.
Parameters
----------
ax : matplotlib.Axes
Axes object
degrees : int
Rotation degrees
"""
for i in ax.get_xticklabels():
i.set_rotation(degrees)
def summary_stats_two_arrays_welch(
old_mean_array,
new_array,
old_var_array=None,
old_n_counter=None,
carried_forward_observations=None,
):
"""Average two arrays using welch's method
Parameters
----------
old_mean_array : Series
Series of previous means with index as positions
old_var_array : Series
Series of previous variances with index as positions
new_array : array like
Series of new observations
(Does noes
Ciunts of number of positions at a certain index
Returns
-------
m : array like
Column wise Mean array
var : array like
Column wise variance
Consider an example: [1,2,3], [1,2,3,4], [1,2,3,4,5]
old = [1,2,3]
new = [1,2,3,4]
counter = [1,1,1]
mean = [1,2,3,4] Var =[na, na, na, na], carried_fowrad = [[1,1], [2,2], [3,3], [4]]
old = [1,2,3,4]
new = [1,2,3,4,5]
couter = [2,2,2,1]
mean = [1,2,3,4,5]
var = [0,0,0, na, na]
carried_forward = [[], [], [], [4,4], [5]]
"""
if not isinstance(old_mean_array, pd.Series):
old_mean_array = pd.Series(old_mean_array)
if not isinstance(new_array, pd.Series):
new_array = pd.Series(new_array)
if old_n_counter is not None and not isinstance(old_n_counter, pd.Series):
old_n_counter = pd.Series(old_n_counter)
len_old, len_new = len(old_mean_array), len(new_array)
if old_n_counter is None:
# Initlaized from current series
old_n_counter = pd.Series(
np.zeros(len(old_mean_array)) + 1, index=old_mean_array.index
)
if old_var_array is None:
# Initlaized from current series
old_var_array = pd.Series(
np.zeros(len(old_mean_array)) + np.nan, index=old_mean_array.index
)
# Update positions counts based on new_array
new_n_counter = old_n_counter.add(
pd.Series(np.zeros(len(new_array)) + 1, index=new_array.index), fill_value=0
)
if len_old > len_new:
len_diff = len_old - len_new
# Pad the incoming array
# We append NAs to the end of new_array since it will mostly be in the metagene context
max_index = np.max(new_array.index.tolist())
new_index = np.arange(max_index + 1, max_index + 1 + len_diff)
new_array = new_array.append(
pd.Series(np.zeros(len_diff) + np.nan, index=new_index),
verify_integrity=True,
)
elif len_old < len_new:
len_diff = len_new - len_old
# Pad the old array
if len_old == 0:
old_mean_array = pd.Series([])
else:
max_index = np.max(old_mean_array.index.tolist())
new_index = np.arange(max_index + 1, max_index + 1 + len_diff)
old_mean_array = old_mean_array.append(
pd.Series(np.zeros(len_diff) + np.nan, index=new_index),
verify_integrity=True,
)
if not (old_mean_array.index == new_array.index).all():
print("old array index: {}".format(old_mean_array))
print("new array index: {}".format(new_array))
positions_with_less_than3_obs = defaultdict(list)
for index, counts in six.iteritems(new_n_counter):
# Which positions has <3 counts for calculating variance
if counts <= 3:
# Fetch the exact observations from history
try:
last_observations = carried_forward_observations[index]
except:
# No carreid forward passed
if not np.isnan(old_mean_array[index]):
last_observations = [old_mean_array[index]]
else:
last_observations = []
# Add entry from new_array only if it is not NAN
if not np.isnan(new_array[index]):
last_observations.append(new_array[index])
positions_with_less_than3_obs[index] = last_observations
# positions_with_less_than3_obs = pd.Series(positions_with_less_than3_obs)
# delta = x_n - mean(x_{n-1})
delta = new_array.subtract(old_mean_array)
"""
for index, value in six.iteritems( delta ):
if np.isnan(value):
if not np.isnan(old_mean_array[index]):
delta[index] = old_mean_array[index]
else:
delta[index] = new_array[index]
"""
# delta = delta/n
delta_normalized = delta.divide(new_n_counter)
# mean(x_n) = mean(x_{n-1}) + delta/n
new_mean_array = old_mean_array.add(delta_normalized)
for index, value in six.iteritems(new_mean_array):
if np.isnan(value):
if not np.isnan(old_mean_array[index]):
new_mean_array[index] = old_mean_array[index]
else:
new_mean_array[index] = new_array[index]
# print(delta)
# print(new_n_counter)
# print(delta_normalized)
# print(new_mean_array)
# mean_difference_current = x_n - mean(x_n)
# mean_difference_previous = x_n - mean(x_{n-1})
mean_difference_current = new_array.fillna(0) - new_mean_array.fillna(0)
mean_difference_previous = new_array.fillna(0) - old_mean_array.fillna(0)
# (x_n-mean(x_n))(x_n-mean(x_{n-1})
product = np.multiply(mean_difference_current, mean_difference_previous)
# (n-1)S_n^2 - (n-2)S_{n-1}^2 = (x_n-mean(x_n)) (x_n-mean(x_{n-1}))
# old_ssq = (n-1)S_{n-1}^2
# (n-2)S_{n-1}^2
old_sum_of_sq = (old_n_counter - 2).multiply(old_var_array.fillna(0))
# new_ssq = (old_ssq + product)
# (n-1) S_n^2
new_sum_of_sq = old_sum_of_sq + product
# if counts is less than 3, set sum of sq to NA
new_sum_of_sq[new_n_counter < 3] = np.nan
# if counts just became 3, compute the variance
for index, counts in six.iteritems(new_n_counter):
if counts == 3:
observations = positions_with_less_than3_obs[index]
variance = np.var(observations)
print(index, variance)
new_sum_of_sq[index] = variance
# delete it from the history
del positions_with_less_than3_obs[index]
new_var_array = new_sum_of_sq.divide(new_n_counter - 1)
new_var_array[new_var_array == np.inf] = np.nan
new_var_array[new_n_counter < 3] = np.nan
"""
for index, counts in six.iteritems(new_n_counter):
if counts < 3:
if not np.isnan(new_array[index]):
if index not in list(positions_with_less_than3_obs.keys()):
positions_with_less_than3_obs[index] = list()
assert index in positions_with_less_than3_obs.keys()
positions_with_less_than3_obs[index].append(new_array[index])
"""
return new_mean_array, new_var_array, new_n_counter, positions_with_less_than3_obs
def path_leaf(path):
"""Get path's tail from a filepath"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def parse_star_logs(infile, outfile=None):
"""Parse star logs into a dict
Parameters
----------
infile : str
Path to starlogs.final.out file
Returns
-------
star_info : dict
Dict with necessary records parsed
"""
ANNOTATIONS = [
"total_reads",
"uniquely_mapped",
"uniquely_mapped_percent",
"multi_mapped_percent",
"unmapped_percent",
"multi_mapped",
]
star_info = OrderedDict()
with open(infile) as fh:
for line in fh:
line = line.strip()
if line.startswith("Number of input reads"):
star_info[ANNOTATIONS[0]] = int(line.strip().split("\t")[1])
elif line.startswith("Uniquely mapped reads number"):
star_info[ANNOTATIONS[1]] = int(line.strip().split("\t")[1])
elif line.startswith("Uniquely mapped reads %"):
star_info[ANNOTATIONS[2]] = round(
float(line.strip("%").split("\t")[1]), 2
)
elif line.startswith("Number of reads mapped to multiple loci"):
star_info[ANNOTATIONS[5]] = int(line.strip().split("\t")[1])
elif line.startswith("Number of reads mapped to too many loci"):
star_info[ANNOTATIONS[5]] += int(line.strip().split("\t")[1])
elif line.startswith("% of reads mapped to multiple loci"):
star_info[ANNOTATIONS[3]] = round(
float(line.strip("%").split("\t")[1]), 2
)
elif line.startswith("% of reads mapped to too many loci"):
star_info[ANNOTATIONS[3]] += round(
float(line.strip("%").split("\t")[1]), 2
)
elif line.startswith("% of reads unmapped: too many mismatches"):
star_info[ANNOTATIONS[4]] = round(
float(line.strip("%").split("\t")[1]), 2
)
elif line.startswith("% of reads unmapped: too short"):
star_info[ANNOTATIONS[4]] += round(
float(line.strip("%").split("\t")[1]), 2
)
elif line.startswith("% of reads unmapped: other"):
star_info[ANNOTATIONS[4]] += round(
float(line.strip("%").split("\t")[1]), 2
)
star_info = {key: round(star_info[key], 2) for key in list(star_info.keys())}
if outfile is None:
return star_info
filename = path_leaf(infile)
filename = filename.strip("Log.final.out")
counts_df = pd.DataFrame.from_dict(star_info, orient="index").T
counts_df.index = [filename]
if outfile:
counts_df.to_csv(outfile, sep=str("\t"), index=True, header=True)
return counts_df
def get_strandedness(filepath):
"""Parse output of infer_experiment.py from RSeqC to get strandedness.
Parameters
----------
filepath : str
Path to infer_experiment.py output
Returns
-------
strandedness : str
reverse or forward or none
"""
with open(filepath) as f:
data = f.read()
splitted = [x.strip() for x in data.split("\n") if len(x.strip()) >= 1]
assert splitted[0] == "This is SingleEnd Data"
fwd_percentage = None
rev_percentage = None
for line in splitted[1:]:
if "Fraction of reads failed to determine:" in line:
continue
elif 'Fraction of reads explained by "++,--":' in line:
fwd_percentage = float(line.split(":")[1])
elif 'Fraction of reads explained by "+-,-+":' in line:
rev_percentage = float(line.split(":")[1])
assert rev_percentage is not None
assert fwd_percentage is not None
ratio = fwd_percentage / rev_percentage
if np.isclose([ratio], [1]):
return "none"
elif ratio >= 0.5:
return "forward"
else:
return "reverse"
def load_pickle(filepath):
"""Read pickled files easy in Python 2/3"""
if ".tsv" in filepath:
raise IndexError
if sys.version_info > (3, 0):
pickled = pickle.load(open(filepath, "rb"), encoding="latin1")
else:
pickled = pickle.load(open(filepath, "rb"))
return pickled
def pad_or_truncate(some_list, target_len):
"""Pad or truncate a list upto given target length
Parameters
----------
some_list : list
Input list
target_length : int
Final length of list
If being extended, returns list padded with NAs.
"""
return some_list[:target_len] + [np.nan] * (target_len - len(some_list))
def pad_five_prime_or_truncate(some_list, offset_5p, target_len):
"""Pad first the 5prime end and then the 3prime end or truncate
Parameters
----------
some_list : list
Input list
offset_5p : int
5' offset
target_length : int
Final length of list
If being extended, returns list padded with NAs.
"""
some_list = list(some_list)
padded_5p = [np.nan] * offset_5p + some_list
return padded_5p[:target_len] + [np.nan] * (target_len - len(padded_5p))
def codon_to_anticodon(codon):
"""Codon to anticodon.
Parameters
----------
codon : string
Input codon
"""
pairs = {"A": "T", "C": "G", "T": "A", "G": "C", "N": "N"}
return "".join(pairs[c] for c in codon)[::-1]
def merge_intervals(
intervals, chromosome_lengths=None, offset_5p=0, offset_3p=0, zero_based=True
):
"""Collapse intervals into non overlapping manner
Parameters
----------
intervals : list of Interval
chromosome_lengths : dict
A map of each chromosome'e length
Only used with offset_3p, offset_5p>0
offset_5p : int (positive)
Number of bases to count upstream (5')
offset_3p : int (positive)
Number of bases to count downstream (3')
zero_based: bool
Indicate if the intervals are zero-based
True means zero-based half open
False means one-based full closed
Returns
-------
interval_combined : list of Interval sorted by the start
A merged version of intervals
This is useful when the annotations are overlapping.
Example:
chr1 310 320 gene1 +
chr1 319 324 gene1 +
Returns:
chr1 310 324 gene1 +
gene_offset_5p: Gene wise 5 prime offset
This might be different from `offset_5p` in cases where
`offset_5p` leads to a negative coordinate
gene_offset_3p: Gene wise 3 prime offset
This might be different from `offset_3p` in cases where
`offset_3p` leads to position beyond chromsome length
"""
if not intervals:
return ([], offset_5p, offset_3p)
chroms = list(set([i.chrom for i in intervals]))
strands = list(set([i.strand for i in intervals]))
if len(chroms) != 1:
sys.stderr.write("Error: chromosomes should be unique")
return ([], offset_5p, offset_3p)
if len(strands) != 1:
sys.stderr.write("Error: strands should be unique")
return ([], offset_5p, offset_3p)
chrom = chroms[0]
strand = strands[0]
# Sort intervals by start
intervals.sort(key=lambda x: x.start)
# Find first interval
first_interval = intervals[0]
# Find last interval
last_interval = intervals[-1]
for i in intervals:
if i.end > last_interval.end:
last_interval = i
if offset_5p != 0 or offset_3p != 0:
if str(chrom) in chromosome_lengths:
chrom_length = chromosome_lengths[str(chrom)]
else:
warnings.warn("Chromosome {} does not exist".format(chrom), UserWarning)
chrom_length = np.inf
else:
chrom_length = np.inf
if zero_based:
lower_bound = 0
else:
lower_bound = 1
upper_bound = chrom_length
if strand == "+":
if first_interval.start - offset_5p >= lower_bound:
first_interval.start -= offset_5p
gene_offset_5p = offset_5p
else:
gene_offset_5p = first_interval.start - lower_bound
first_interval.start = lower_bound
if last_interval.end + offset_3p <= upper_bound:
last_interval.end += offset_3p
gene_offset_3p = offset_3p
else:
gene_offset_3p = upper_bound - last_interval.end
last_interval.end = upper_bound
else:
if last_interval.end + offset_5p <= upper_bound:
last_interval.end += offset_5p
gene_offset_5p = offset_5p
else:
gene_offset_5p = upper_bound - last_interval.end
last_interval.end = upper_bound
if first_interval.start - offset_3p >= lower_bound:
first_interval.start -= offset_3p
gene_offset_3p = offset_3p
else:
gene_offset_3p = first_interval.start - lower_bound
first_interval.start = lower_bound
# Merge overlapping intervals
to_merge = Interval(chrom, first_interval.start, first_interval.end, strand)
intervals_combined = []
for i in intervals:
if i.start <= to_merge.end:
to_merge.end = max(to_merge.end, i.end)
else:
intervals_combined.append(to_merge)
to_merge = Interval(chrom, i.start, i.end, strand)
intervals_combined.append(to_merge)
return (intervals_combined, gene_offset_5p, gene_offset_3p)
def summarize_counters(samplewise_dict):
"""Summarize gene counts for a collection of samples.
Parameters
----------
samplewise_dict : dict
A dictionary with key as sample name and value
as another dictionary of counts for each gene
Returns
-------
totals : dict
A dictionary with key as sample name and value as total gene count
"""
totals = {}
for key, sample_dict in six.iteritems(samplewise_dict):
totals[key] = np.nansum([np.nansum(d) for d in list(sample_dict.values)])
return totals
def complementary_strand(strand):
"""Get complementary strand
Parameters
----------
strand: string
+/-
Returns
-------
rs: string
-/+
"""
if strand == "+":
return "-"
elif strand == "-":
return "+"
else:
raise ValueError("Not a valid strand: {}".format(strand))
def read_refseq_bed(filepath):
"""Read refseq bed12 from UCSC.
Parameters
----------
filepath: string
Location to bed12
Returns
-------
refseq: dict
dict with keys as gene name and values as intervaltree
"""
refseq = defaultdict(IntervalTree)
with open(filepath, "r") as fh:
for line in fh:
line = line.strip()
if line.startswith(("#", "track", "browser")):
continue
fields = line.split("\t")
chrom, tx_start, tx_end, name, score, strand = fields[:6]
tx_start = int(tx_start)
tx_end = int(tx_end)
refseq[chrom].insert(tx_start, tx_end, strand)
return refseq
def read_bed_as_intervaltree(filepath):
"""Read bed as interval tree
Useful for reading start/stop codon beds
Parameters
----------
filepath: string
Location to bed
Returns
-------
bedint_tree: dict
dict with keys as gene name and strand as intervaltree
"""
bed_df = pybedtools.BedTool(filepath).sort().to_dataframe()
bed_df["chrom"] = bed_df["chrom"].astype(str)
bed_df["name"] = bed_df["name"].astype(str)
bed_grouped = bed_df.groupby("chrom")
bedint_tree = defaultdict(IntervalTree)
for chrom, df in bed_grouped:
df_list = list(zip(df["start"], df["end"], df["strand"]))
for start, end, strand in df_list:
bedint_tree[chrom].insert(start, end, strand)
return bedint_tree
def read_chrom_sizes(filepath):
"""Read chr.sizes file sorted by chromosome name
Parameters
----------
filepath: string
Location to chr.sizes
Returns
-------
chrom_lengths: list of tuple
A list of tuples with chromsome name and their size
"""
chrom_lengths = []
with open(filepath, "r") as fh:
for line in fh:
chrom, size = line.strip().split("\t")
chrom_lengths.append((chrom, int(size)))
chrom_lengths = list(sorted(chrom_lengths, key=lambda x: x[0]))
def create_bam_index(bam):
"""Create bam index.
Parameters
----------
bam : str
Path to bam file
"""
if isinstance(bam, pysam.AlignmentFile):
bam = bam.filename
if not os.path.exists("{}.bai".format(bam)):
pysam.index(bam)
def is_read_uniq_mapping(read):
"""Check if read is uniquely mappable.
Parameters
----------
read : pysam.Alignment.fetch object
Most reliable: ['NH'] tag
"""
# Filter out secondary alignments
if read.is_secondary:
return False
tags = dict(read.get_tags())
try:
nh_count = tags["NH"]
except KeyError:
# Reliable in case of STAR
if read.mapping_quality == 255:
return True
if read.mapping_quality < 1:
return False
# NH tag not set so rely on flags
if read.flag in __SAM_NOT_UNIQ_FLAGS__:
return False
else:
raise RuntimeError("Malformed BAM?")
if nh_count == 1:
return True
return False
def find_first_non_none(positions):
"""Given a list of positions, find the index and value of first non-none element.
This method is specifically designed for pysam, which has a weird way of returning
the reference positions. If they are mismatched/softmasked it returns None
when fetched using get_reference_positions.
query_alignment_start and query_alignment_end give you indexes of position in the read
which technically align, but are not softmasked i.e. it is set to None even if the position does not align
Parameters
----------
positions: list of int
Positions as returned by pysam.fetch.get_reference_positions
Return
------
index: int
Index of first non-None value
position: int
Value at that index
"""
for idx, position in enumerate(positions):
if position is not None:
return idx, position
def find_last_non_none(positions):
"""Given a list of positions, find the index and value of last non-none element.
This function is similar to the `find_first_non_none` function, but does it for the reversed
list. It is specifically useful for reverse strand cases
Parameters
----------
positions: list of int
Positions as returned by pysam.fetch.get_reference_positions
Return
------
index: int
Index of first non-None value
position: int
Value at that index
"""
return find_first_non_none(positions[::-1])
# NOTE: We can in principle do a longer metagene anaylsis
# using this helper funciont
def yield_intervals(chrom_size, chunk_size=20000):
for start in np.arange(0, chrom_size, chunk_size):
end = start + chunk_size
if end > chrom_size:
yield (start, chrom_size)
else:
yield (start, end)
def bwsum(bw, chunk_size=5000, scale_to=1e6):
bw_sum = 0
if isinstance(bw, six.string_types):
bw = pyBigWig.open(bw)
chrom_sizes = bw.chroms()
for chrom, chrom_size in six.iteritems(chrom_sizes):
for start, end in yield_intervals(chrom_size, chunk_size):
bw_sum += np.nansum(bw.values(chrom, start, end))
scale_factor = 1 / (bw_sum / scale_to)
return bw_sum, scale_factor
def scale_bigwig(inbigwig, chrom_sizes, outbigwig, scale_factor=1):
"""Scale a bigwig by certain factor.
Parameters
----------
inbigwig: string
Path to input bigwig
chrom_sizes: string
Path to chrom.sizes file
outbigwig: string
Path to output bigwig
scale_factor: float
Scale by value
"""
wigfile = os.path.abspath("{}.wig".format(outbigwig))
chrom_sizes = os.path.abspath(chrom_sizes)
inbigwig = os.path.abspath(inbigwig)
outbigwig = os.path.abspath(outbigwig)
if os.path.isfile(wigfile):
# wiggletools errors if the file already exists
os.remove(wigfile)
cmds = ["wiggletools", "write", wigfile, "scale", str(scale_factor), inbigwig]
try:
p = subprocess.Popen(
cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = p.communicate()
rc = p.returncode
if rc != 0:
raise RuntimeError(
"Error running wiggletools.\nstdout : {} \n stderr : {}".format(
stdout, stderr
)
)
except FileNotFoundError:
raise FileNotFoundError(
"wiggletool not found on the path." "Use `conda install wiggletools`"
)
cmds = ["wigToBigWig", wigfile, chrom_sizes, outbigwig]
try:
p = subprocess.Popen(
cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = p.communicate()
rc = p.returncode
if rc != 0:
raise RuntimeError(
"Error running wigToBigWig.\nstdout : {} \n stderr : {}".format(
stdout, stderr
)
)
os.remove(wigfile)
except FileNotFoundError:
raise FileNotFoundError(
"wigToBigwig not found on the path. This is an external "
"tool from UCSC which can be downloaded from "
"http://hgdownload.soe.ucsc.edu/admin/exe/. Alternatatively, use "
"`conda install ucsc-wigtobigwig`"
)
def get_region_sizes(region_bed):
"""Get summed up size of a CDS/UTR region from bed file
Parameters
----------
region_bed: string
Input bed file
Returns
-------
region_sizes: pd.Series
Series with region name as index and size as key
"""
if isinstance(region_bed, six.string_types):
region_bed = pybedtools.BedTool(region_bed).to_dataframe()
region_bed_grouped = region_bed.groupby("name")
region_sizes = {}
for gene_name, gene_group in region_bed_grouped:
## Get rid of trailing dots
gene_name = re.sub(r"\.[0-9]+", "", gene_name)
# Collect all intervals at once
intervals = list(
zip(
gene_group["chrom"],
gene_group["start"],
gene_group["end"],
gene_group["strand"],
)
)
for interval in intervals:
if gene_name not in region_sizes:
# End is always 1-based so does not require +1
region_sizes[gene_name] = interval[2] - interval[1]
else:
region_sizes[gene_name] += interval[2] - interval[1]
return pd.Series(region_sizes)
def htseq_to_tpm(htseq_f, outfile, cds_bed_f):
"""Convert htseq-counts file to tpm
Parameters
----------
htseq_f: string
Path to htseq-count output
outfile: string
Path to output file with tpm values
cds_bed_f: string
Path to CDS/genesize bed file
"""
cds_bed = pybedtools.BedTool(cds_bed_f).to_dataframe()
cds_bed_sizes = get_region_sizes(cds_bed)
htseq = pd.read_table(htseq_f, names=["name", "counts"]).set_index("name")
htseq = htseq.iloc[:-5]
if htseq.shape[0] <= 10:
print("Empty dataframe for : {}\n".format(htseq_f))
return None
rate = np.log(htseq["counts"]).subtract(np.log(cds_bed_sizes))
denom = np.log(np.sum(np.exp(rate)))
tpm = np.exp(rate - denom + np.log(1e6))
tpm = pd.DataFrame(tpm, columns=["tpm"])
tpm = tpm.sort_values(by=["tpm"], ascending=False)
tpm.to_csv(outfile, sep="\t", index=True, header=False)
def counts_to_tpm(counts, sizes):
"""Counts to TPM
Parameters
----------
counts: array like
Series/array of counts
sizes: array like
Series/array of region sizes
"""
rate = np.log(counts).subtract(np.log(sizes))
denom = np.log(np.sum(np.exp(rate)))
tpm = np.exp(rate - denom + np.log(1e6))
return tpm
def featurecounts_to_tpm(fc_f, outfile):
"""Convert htseq-counts file to tpm
Parameters
----------
fc_f: string
Path to htseq-count output
outfile: string
Path to output file with tpm values
"""
feature_counts = pd.read_csv(fc_f, sep="\t")
feature_counts = feature_counts.set_index("Geneid")
feature_counts = feature_counts.drop(columns=["Chr", "Start", "End", "Strand"])
lengths = feature_counts["Length"]
feature_counts = feature_counts.drop(columns=["Length"])
tpm = feature_counts.apply(lambda x: counts_to_tpm(x, lengths), axis=0)
tpm.columns = [
col.replace("bams_unique/", "").replace(".bam", "") for col in tpm.columns
]
tpm.to_csv(outfile, sep="\t", index=True, header=True)
def read_htseq(htseq_f):
"""Read HTSeq file.
Parameters
----------
htseq_f: str
Path to htseq counts file
Returns
-------
htseq_df: dataframe
HTseq counts as in a dataframe
"""
htseq = | pd.read_table(htseq_f, names=["name", "counts"]) | pandas.read_table |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = | pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = | pd.Series([5., 4., 3., 2., 1.], index=price.index) | pandas.Series |
#
# Copyright (c) 2016, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by <NAME>, June 2016
import random
import pandas as pd
import numpy as np
from collections import defaultdict, Counter
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.externals.joblib import Parallel, delayed
from rdkit import Chem
from rdkit.Chem import AllChem, rdqueries, BRICS
from ChemTopicModel import utilsFP
#### FRAGMENT GENERATION ####################
def _prepBRICSSmiles(m):
smi = Chem.MolToSmiles(m,isomericSmiles=True, allHsExplicit=True, allBondsExplicit=True)
# delete the connection ids
smi = re.sub(r"\[\d+\*\]", "[*]", smi)
order = eval(m.GetProp("_smilesAtomOutputOrder"))
# make the smiles more descriptive, add properties
return utilsFP.writePropsToSmiles(m,smi,order)
def _generateFPs(mol,fragmentMethod='Morgan'):
aBits={}
fp=None
# circular Morgan fingerprint fragmentation, we use a simple invariant than ususal here
if fragmentMethod=='Morgan':
tmp={}
fp = AllChem.GetMorganFingerprint(mol,radius=2,invariants=utilsFP.generateAtomInvariant(mol),bitInfo=tmp)
aBits = utilsFP.getMorganEnvironment(mol, tmp, fp=fp, minRad=2)
fp = fp.GetNonzeroElements()
# path-based RDKit fingerprint fragmentation
elif fragmentMethod=='RDK':
fp = AllChem.UnfoldedRDKFingerprintCountBased(mol,maxPath=5,minPath=3,bitInfo=aBits)
fp = fp.GetNonzeroElements()
# get the final BRICS fragmentation (= smallest possible BRICS fragments of a molecule)
elif fragmentMethod=='Brics':
fragMol=BRICS.BreakBRICSBonds(mol)
propSmi = _prepBRICSSmiles(fragMol)
fp=Counter(propSmi.split('.'))
else:
print("Unknown fragment method")
return fp, aBits
# this function is not part of the class due to parallelisation
# generate the fragments of a molecule, return a map with moleculeID and fragment dict
def _generateMolFrags(datachunk, vocabulary, fragmentMethod, fragIdx=None):
if fragIdx is None and fragmentMethod == 'Brics':
return
result={}
for idx, smi in datachunk:
mol = Chem.MolFromSmiles(str(smi))
if mol == None:
continue
fp,_=_generateFPs(mol,fragmentMethod=fragmentMethod)
if fp is None:
continue
tmp={}
for k,v in fp.items():
if k not in vocabulary:
continue
# save memory: for BRICS use index instead of long complicated SMILES
if fragmentMethod == 'Brics':
tmp[fragIdx[k]]=v
else:
tmp[k]=v
result[idx]=tmp
return result
########### chemical topic modeling class ###################
class ChemTopicModel:
# initialisation chemical topic model
def __init__(self, fragmentMethod = 'Morgan', randomState=42, sizeSampleDataSet=0.1, rareThres=0.001,
commonThres=0.1, verbose=0, n_jobs=1, chunksize=1000, learningMethod='batch'):
self.fragmentMethod = fragmentMethod
self.seed = randomState
self.sizeSampleDataSet = sizeSampleDataSet
self.rareThres = rareThres
self.commonThres = commonThres
self.verbose = verbose
self.n_jobs = n_jobs
self.chunksize = chunksize
self.learningMethod = learningMethod
# generate the fragments used for the model, exclude rare and common fragments depending on a threshold
def _generateFragmentVocabulary(self,molSample):
fps=defaultdict(int)
# collect fragments from a sample of the dataset
for smi in molSample:
mol = Chem.MolFromSmiles(str(smi))
if mol is None:
continue
fp,_=_generateFPs(mol,fragmentMethod=self.fragmentMethod)
if fp is None:
continue
for bit in fp.keys():
fps[bit]+=1
# filter rare and common fragments
fragOcc = np.array(list(fps.values()))
normFragOcc = fragOcc/float(len(molSample))
ids = normFragOcc > self.commonThres
normFragOcc[ids] = 0
ids = normFragOcc < self.rareThres
normFragOcc[ids] = 0
keys = list(fps.keys())
self.vocabulary = sorted(n for n,i in zip(keys,normFragOcc) if i != 0)
self.fragIdx=dict((i,j) for j,i in enumerate(self.vocabulary))
if self.verbose:
print('Created vocabulary, size: {0}, used sample size: {1}'.format(len(self.vocabulary),len(molSample)))
# generate the fragment templates important for the visualisation of the topics later
def _generateFragmentTemplates(self,molSample):
fragTemplateDict=defaultdict(list)
voc=set(self.vocabulary)
if not len(self.vocabulary):
print('Please generate your vocabulary first')
return
sizeVocabulary=len(self.vocabulary)
for n,smi in enumerate(molSample):
mol = Chem.MolFromSmiles(str(smi))
if mol == None:
continue
fp,aBits=_generateFPs(mol,fragmentMethod=self.fragmentMethod)
if fp is None:
continue
for k,v in fp.items():
if k not in voc or k in fragTemplateDict:
continue
# save memory: for brics use index instead of long complicated smarts
if self.fragmentMethod in ['Brics','BricsAll']:
fragTemplateDict[self.fragIdx[k]]=['', []]
else:
fragTemplateDict[k]=[smi, aBits[k][0]]
if len(fragTemplateDict) == sizeVocabulary:
break
tmp = [[k,v[0],v[1]] for k,v in fragTemplateDict.items()]
self.fragmentTemplates = | pd.DataFrame(tmp,columns=['bitIdx','templateMol','bitPathTemplateMol']) | pandas.DataFrame |
from pandas import Series,DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from sklearn import preprocessing
import os
from sklearn import datasets
import sklearn.metrics as sm
from sklearn.preprocessing import LabelEncoder
#calling in libraries
data= pd.read_csv(r"C:\Users\mudit\Google Drive\docs\bank\Book1.csv") #reading csv file\
#dataset https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
data_clean=data.dropna()
#delete observations with missing data
cluster=data_clean[['age','job','marital','education','default',
'balance', 'housing' ,'loan','contact',
'day', 'month', 'duration', 'campaign',
'pdays', 'previous', 'poutcome','y','p']]
#variables choosen for clustering
#cluster.describe()
number =LabelEncoder()
clustervar=cluster.copy()
clustervar['job']=number.fit_transform(clustervar['job'].astype('str'))
clustervar['job']=preprocessing.scale(clustervar['job'].astype('float64'))
clustervar['marital']=number.fit_transform(clustervar['marital'].astype('str'))
clustervar['marital']=preprocessing.scale(clustervar['marital'].astype('float64'))
clustervar['education']=number.fit_transform(clustervar['education'].astype('str'))
clustervar['education']=preprocessing.scale(clustervar['education'].astype('float64'))
clustervar['housing']=number.fit_transform(clustervar['housing'].astype('str'))
clustervar['housing']=preprocessing.scale(clustervar['housing'].astype('float64'))
clustervar['loan']=number.fit_transform(clustervar['loan'].astype('str'))
clustervar['loan']=preprocessing.scale(clustervar['loan'].astype('float64'))
clustervar['default']=number.fit_transform(clustervar['default'].astype('str'))
clustervar['default']=preprocessing.scale(clustervar['default'].astype('float64'))
clustervar['contact']=number.fit_transform(clustervar['contact'].astype('str'))
clustervar['contact']=preprocessing.scale(clustervar['contact'].astype('float64'))
clustervar['poutcome']=number.fit_transform(clustervar['poutcome'].astype('str'))
clustervar['poutcome']=preprocessing.scale(clustervar['poutcome'].astype('float64'))
clustervar['y']=number.fit_transform(clustervar['y'].astype('str'))
clustervar['y']=preprocessing.scale(clustervar['y'].astype('float64'))
clustervar['month']=number.fit_transform(clustervar['month'].astype('str'))
clustervar['month']=preprocessing.scale(clustervar['month'].astype('float64'))
clustervar['p']=number.fit_transform(clustervar['p'].astype('str'))
clustervar['p']=preprocessing.scale(clustervar['p'].astype('float64'))
#standardize clustering variables to have mean=0 and sd(standard deviation)=1
#split data into train and test sets
clus_train, clus_test = train_test_split(clustervar,test_size=.3,random_state=123)
# k-means cluster analysis for 1-9 clusters
from scipy.spatial.distance import cdist
#clusters=np.array(range(1,10))
clusters=range(1,10)
meandist=[]
for k in clusters:
model=KMeans(n_clusters=k)
model.fit(clus_train)
clusassign=model.predict(clus_train)
meandist.append(sum(np.min(cdist(clus_train,model.cluster_centers_,'euclidean'),axis=1))
/ clus_train.shape[0])
#plotting average distaance from observations from the cluster
#centroid to use the elbow method to identify the number of clusters to choose
plt.plot(clusters,meandist)
plt.xlabel("number of clusters")
plt.ylabel("average distance")
plt.title("selecting k with elbow method")
plt.show()
#now k means for 3 clusters
model3=KMeans(n_clusters=3)
model3.fit(clus_train)
clusassign=model3.predict(clus_train)
#plot of clusters
model3.labels_
from sklearn.decomposition import PCA
pca_2=PCA(2)
plot_columns=pca_2.fit_transform(clus_train)
plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=model3.labels_,)
plt.xlabel('canonical variable 1')
plt.ylabel('canonical variable 2')
plt.title('Scatterplot of canonical variable for 3 clusters')
plt.show()
#BEGIN multiple steps to merge cluster assignment with clustering variables to examine
#cluster variable means by cluster
#examining cluster variables
# create a unique identifier variable from the index for the
# cluster training data to merge with the cluster assignment variable
clus_train.reset_index(level=0, inplace=True)
# create a list that has the new index variable
cluslist=list(clus_train['index'])
# create a list of cluster assignments
labels=list(model3.labels_)
# combine index variable list with cluster assignment list into a dictionary
newlist=dict(zip(cluslist, labels))
newlist
# convert newlist dictionary to a dataframe
newclus= | DataFrame.from_dict(newlist,orient='index') | pandas.DataFrame.from_dict |
import argparse
import os
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import StratifiedShuffleSplit
from torch import optim
class DevNet(nn.Module):
"""docstring for ClassName."""
def __init__(self,
feature_dim,
num_class,
ae_hidden_neurons=[512, 256, 128],
cl_hidden_neurons=[64, 32, 10],
drop_rate=0.2,
batch_norm=True,
hidden_activation='relu',
output_activation='sigmoid'):
super(DevNet, self).__init__()
self.feature_dim = feature_dim
self.num_class = num_class
self.layers_neurons_encoder = [self.feature_dim, *ae_hidden_neurons]
self.layers_neurons_decoder = self.layers_neurons_encoder[::-1]
self.cl_hidden_neurons = [ae_hidden_neurons[-1], *cl_hidden_neurons, 1]
self.drop_rate = drop_rate
self.batch_norm = batch_norm
self.hidden_activation = nn.ReLU()
self.output_activation = nn.Sigmoid()
self.encoder = nn.Sequential()
self.decoder = nn.Sequential()
self.classifier = nn.Sequential()
# create encoder model
for idx, layer in enumerate(self.layers_neurons_encoder[:-1]):
self.encoder.add_module(
'linear' + str(idx),
nn.Linear(self.layers_neurons_encoder[idx],
self.layers_neurons_encoder[idx + 1]))
self.encoder.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.layers_neurons_encoder[idx + 1]))
self.encoder.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
self.encoder.add_module(hidden_activation + str(idx),
self.hidden_activation)
# create decoder model
for idx, layer in enumerate(self.layers_neurons_decoder[:-1]):
self.decoder.add_module(
'linear' + str(idx),
nn.Linear(self.layers_neurons_decoder[idx],
self.layers_neurons_decoder[idx + 1]))
self.decoder.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.layers_neurons_decoder[idx + 1]))
self.decoder.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
if idx == len(self.layers_neurons_decoder) - 2:
self.decoder.add_module(output_activation + str(idx),
self.output_activation)
else:
self.decoder.add_module(hidden_activation + str(idx),
self.hidden_activation)
# create classifier
for idx, layer in enumerate(self.cl_hidden_neurons[:-2]):
self.classifier.add_module(
'linear' + str(idx),
nn.Linear(self.cl_hidden_neurons[idx],
self.cl_hidden_neurons[idx + 1]))
self.classifier.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.cl_hidden_neurons[idx + 1]))
self.classifier.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
self.classifier.add_module(hidden_activation + str(idx),
self.hidden_activation)
idx += 1
self.classifier.add_module(
'linear' + str(idx),
nn.Linear(self.cl_hidden_neurons[idx],
self.cl_hidden_neurons[idx + 1]))
def forward(self, x):
feature_vector = self.encoder(x)
ae_output = self.decoder(feature_vector)
cls_output = self.classifier(feature_vector)
return ae_output, cls_output
class Task(object):
def __init__(self, data_name, data_path, label_name, saved=False):
super(Task, self).__init__()
self.data_name = data_name
self.data_path = data_path
self.label_name = label_name
self.saved = saved
def data_split(self):
"""split dataset to train set and test set."""
data_all = | pd.read_csv(self.data_path) | pandas.read_csv |
"""Ibis expression API definitions."""
from __future__ import annotations
import collections
import datetime
import numbers
from typing import Iterable, Literal, Mapping, Sequence, TypeVar
import dateutil.parser
import pandas as pd
import ibis.common.exceptions as com
import ibis.expr.builders as bl
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis.expr.random import random # noqa
from ibis.expr.schema import Schema
from ibis.expr.types import ( # noqa
ArrayColumn,
ArrayScalar,
ArrayValue,
BooleanColumn,
BooleanScalar,
BooleanValue,
CategoryScalar,
CategoryValue,
ColumnExpr,
DateColumn,
DateScalar,
DateValue,
DecimalColumn,
DecimalScalar,
DecimalValue,
DestructColumn,
DestructScalar,
DestructValue,
Expr,
FloatingColumn,
FloatingScalar,
FloatingValue,
GeoSpatialColumn,
GeoSpatialScalar,
GeoSpatialValue,
IntegerColumn,
IntegerScalar,
IntegerValue,
IntervalColumn,
IntervalScalar,
IntervalValue,
LineStringColumn,
LineStringScalar,
LineStringValue,
MapColumn,
MapScalar,
MapValue,
MultiLineStringColumn,
MultiLineStringScalar,
MultiLineStringValue,
MultiPointColumn,
MultiPointScalar,
MultiPointValue,
MultiPolygonColumn,
MultiPolygonScalar,
MultiPolygonValue,
NullColumn,
NullScalar,
NullValue,
NumericColumn,
NumericScalar,
NumericValue,
PointColumn,
PointScalar,
PointValue,
PolygonColumn,
PolygonScalar,
PolygonValue,
ScalarExpr,
StringColumn,
StringScalar,
StringValue,
StructColumn,
StructScalar,
StructValue,
TableExpr,
TimeColumn,
TimeScalar,
TimestampColumn,
TimestampScalar,
TimestampValue,
TimeValue,
ValueExpr,
array,
literal,
map,
null,
struct,
)
from ibis.expr.types.groupby import GroupedTableExpr # noqa
from ibis.expr.window import (
cumulative_window,
range_window,
rows_with_max_lookback,
trailing_range_window,
trailing_window,
window,
)
__all__ = (
'aggregate',
'array',
'case',
'coalesce',
'cross_join',
'cumulative_window',
'date',
'desc',
'asc',
'Expr',
'geo_area',
'geo_as_binary',
'geo_as_ewkb',
'geo_as_ewkt',
'geo_as_text',
'geo_azimuth',
'geo_buffer',
'geo_centroid',
'geo_contains',
'geo_contains_properly',
'geo_covers',
'geo_covered_by',
'geo_crosses',
'geo_d_fully_within',
'geo_disjoint',
'geo_difference',
'geo_d_within',
'geo_envelope',
'geo_equals',
'geo_geometry_n',
'geo_geometry_type',
'geo_intersection',
'geo_intersects',
'geo_is_valid',
'geo_line_locate_point',
'geo_line_merge',
'geo_line_substring',
'geo_ordering_equals',
'geo_overlaps',
'geo_touches',
'geo_distance',
'geo_end_point',
'geo_length',
'geo_max_distance',
'geo_n_points',
'geo_n_rings',
'geo_perimeter',
'geo_point',
'geo_point_n',
'geo_simplify',
'geo_srid',
'geo_start_point',
'geo_transform',
'geo_unary_union',
'geo_union',
'geo_within',
'geo_x',
'geo_x_max',
'geo_x_min',
'geo_y',
'geo_y_max',
'geo_y_min',
'greatest',
'ifelse',
'infer_dtype',
'infer_schema',
'interval',
'join',
'least',
'literal',
'map',
'NA',
'negate',
'now',
'null',
'param',
'pi',
'prevent_rewrite',
'random',
'range_window',
'row_number',
'rows_with_max_lookback',
'schema',
'Schema',
'sequence',
'struct',
'table',
'time',
'timestamp',
'trailing_range_window',
'trailing_window',
'where',
'window',
)
infer_dtype = dt.infer
infer_schema = sch.infer
NA = null()
T = TypeVar("T")
def param(type: dt.DataType) -> ir.ScalarExpr:
"""Create a deferred parameter of a given type.
Parameters
----------
type
The type of the unbound parameter, e.g., double, int64, date, etc.
Returns
-------
ScalarExpr
A scalar expression backend by a parameter
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> start = ibis.param(dt.date)
>>> end = ibis.param(dt.date)
>>> schema = [('timestamp_col', 'timestamp'), ('value', 'double')]
>>> t = ibis.table(schema)
>>> predicates = [t.timestamp_col >= start, t.timestamp_col <= end]
>>> expr = t.filter(predicates).value.sum()
"""
return ops.ScalarParameter(dt.dtype(type)).to_expr()
def sequence(values: Sequence[T | None]) -> ir.ListExpr:
"""Wrap a list of Python values as an Ibis sequence type.
Parameters
----------
values
Should all be None or the same type
Returns
-------
ListExpr
A list expression
"""
return ops.ValueList(values).to_expr()
def schema(
pairs: Iterable[tuple[str, dt.DataType]]
| Mapping[str, dt.DataType]
| None = None,
names: Iterable[str] | None = None,
types: Iterable[str | dt.DataType] | None = None,
) -> sch.Schema:
"""Validate and return an Schema object.
Parameters
----------
pairs
List or dictionary of name, type pairs. Mutually exclusive with `names`
and `types`.
names
Field names. Mutually exclusive with `pairs`.
types
Field types. Mutually exclusive with `pairs`.
Examples
--------
>>> from ibis import schema
>>> sc = schema([('foo', 'string'),
... ('bar', 'int64'),
... ('baz', 'boolean')])
>>> sc2 = schema(names=['foo', 'bar', 'baz'],
... types=['string', 'int64', 'boolean'])
Returns
-------
Schema
An ibis schema
""" # noqa: E501
if pairs is not None:
return Schema.from_dict(dict(pairs))
else:
return Schema(names, types)
_schema = schema
def table(schema: sch.Schema, name: str | None = None) -> ir.TableExpr:
"""Create an unbound table for build expressions without data.
Parameters
----------
schema
A schema for the table
name
Name for the table
Returns
-------
TableExpr
An unbound table expression
"""
if not isinstance(schema, Schema):
schema = _schema(pairs=schema)
node = ops.UnboundTable(schema, name=name)
return node.to_expr()
def desc(expr: ir.ColumnExpr | str) -> ir.SortExpr | ops.DeferredSortKey:
"""Create a descending sort key from `expr` or column name.
Parameters
----------
expr
The expression or column name to use for sorting
Examples
--------
>>> import ibis
>>> t = ibis.table([('g', 'string')])
>>> result = t.group_by('g').size('count').sort_by(ibis.desc('count'))
Returns
-------
ops.DeferredSortKey
A deferred sort key
"""
if not isinstance(expr, Expr):
return ops.DeferredSortKey(expr, ascending=False)
else:
return ops.SortKey(expr, ascending=False).to_expr()
def asc(expr: ir.ColumnExpr | str) -> ir.SortExpr | ops.DeferredSortKey:
"""Create a ascending sort key from `asc` or column name.
Parameters
----------
expr
The expression or column name to use for sorting
Examples
--------
>>> import ibis
>>> t = ibis.table([('g', 'string')])
>>> result = t.group_by('g').size('count').sort_by(ibis.asc('count'))
Returns
-------
ops.DeferredSortKey
A deferred sort key
"""
if not isinstance(expr, Expr):
return ops.DeferredSortKey(expr)
else:
return ops.SortKey(expr).to_expr()
def timestamp(
value: str | numbers.Integral,
timezone: str | None = None,
) -> ir.TimestampScalar:
"""Construct a timestamp literal if `value` is coercible to a timestamp.
Parameters
----------
value
The value to use for constructing the timestamp
timezone
The timezone of the timestamp
Returns
-------
TimestampScalar
A timestamp expression
"""
if isinstance(value, str):
try:
value = | pd.Timestamp(value, tz=timezone) | pandas.Timestamp |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# check scalar comparison
expect_matches = pdsr == "a"
got_matches = sr == "a"
np.testing.assert_array_equal(
expect_matches.values, got_matches.to_array()
)
# mask series
expect_masked = pdsr[expect_matches]
got_masked = sr[got_matches]
assert len(expect_masked) == len(got_masked)
assert len(expect_masked) == got_masked.valid_count
assert_eq(got_masked, expect_masked)
def test_df_cat_set_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a")
pddf = df.to_pandas(nullable_pd_dtype=False)
expect = pddf.set_index("a")
assert_eq(got, expect)
def test_df_cat_sort_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a").sort_index()
expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index()
assert_eq(got, expect)
def test_cat_series_binop_error():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
dfa = df["a"]
dfb = df["b"]
# lhs is a categorical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfa, dfb],),
rfunc_args_and_kwargs=([dfa, dfb],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: add",
)
# if lhs is a numerical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfb, dfa],),
rfunc_args_and_kwargs=([dfb, dfa],),
check_exception_type=False,
expected_error_message="'add' operator not supported",
)
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_unique(num_elements):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), num_elements
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas())
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique_sorted = np.sort(pdf["a"].unique())
# verify
np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted)
@pytest.mark.parametrize("nelem", [20, 50, 100])
def test_categorical_unique_count(nelem):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), nelem
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_count = gdf["a"].nunique()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique = pdf["a"].unique()
# verify
assert gdf_unique_count == len(pdf_unique)
def test_categorical_empty():
cat = pd.Categorical([])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_array())
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
def test_categorical_set_categories():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
psr = | pd.Series(cat) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pandas 学习
字典形式的numpy
"""
from __future__ import print_function
import numpy as np
import pandas as pd
# concatenating
# ignore index
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*2, columns=['a','b','c','d'])
# print(df1)
# print(df2)
# print(df3)
res = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
# print(res)
# join, ('inner', 'outer')
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'], index=[1,2,3])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['b','c','d', 'e'], index=[2,3,4])
res = pd.concat([df1, df2], axis=1, join='outer')
# print(res)
res = pd.concat([df1, df2], axis=1, join='inner')
# print(res)
# # join_axes
res = | pd.concat([df1, df2], axis=1, join_axes=[df1.index]) | pandas.concat |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([ | Timestamp('20160101') | pandas.Timestamp |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv( | StringIO('A,B') | pandas.compat.StringIO |
import pandas as pd
import pdb
import pickle
import matplotlib.pyplot as plt
from Analysis_Utils import preprocessing_df as preprocessing
import DataStream_Vis_Utils as utils
from moviepy.editor import *
import skvideo
import cv2
import imageio
import numpy as np
from scipy import ndimage
from errno import EEXIST, ENOENT
ffm_path = 'C:/Users/bassp/OneDrive/Desktop/ffmpeg/bin/'
skvideo.setFFmpegPath(ffm_path)
import skvideo.io
# Public Functions
def loop_over_rats_and_extract_reaches(prediction_dataframe, e_dataframe, dummy_video_path, rat):
global r_mask, reaching, bout
save_path = '/Users/bassp/OneDrive/Desktop/Classification Project/reach_thresholds_RM15/'
# Get rat, date, session for each block we need to process.
k_dataframe = | pd.read_pickle(prediction_dataframe) | pandas.read_pickle |
# -*- coding: utf-8 -*-
import json
import pandas as pd
import tweepy
N_TWEETS_PER_REQUEST = 200
def from_twitter(profile, access_token=None, access_token_secret=None,
consumer_key=None, consumer_secret=None, as_file=None):
"""
Downloads the timeline of a Twitter profile and turn it into a dataframe.
Args:
profiles: scalar or list of one/multiple Twitter screen names.
access_token: the access token as provided by Twitter.
access_token_secret: the access token as provided by Twitter.
consumer_key: the consumer key as provided by Twitter.
consumer_secret:the consumer secret as provided by Twitter.
as_file: boolean.
Returns:
A dataframe, each row is a Tweet.
"""
api = authenticate(
consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token=access_token, access_token_secret=access_token_secret)
if not api.verify_credentials():
print("Credentials are invalid.")
return None
pages = fetch_timeline_pages(
api, profile, n_tweets_per_request=N_TWEETS_PER_REQUEST)
pages = [pd.DataFrame(page) for page in pages]
df = | pd.concat(pages) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('siteid')['siteid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('siteid')['siteid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('siteid')['siteid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('siteid')['siteid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('siteid')['siteid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('siteid')['siteid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('siteid')['siteid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('siteid')['siteid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('siteid')['siteid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('siteid')['siteid'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_siteid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_siteid['num_of_years']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid', how='outer')
d2 = pd.merge(d1, df_12, on='siteid', how='outer')
d3 = pd.merge(d2, df_13, on='siteid', how='outer')
d4 = pd.merge(d3, df_14, on='siteid', how='outer')
d5 = pd.merge(d4, df_15, on='siteid', how='outer')
d6 = pd.merge(d5, df_16, on='siteid', how='outer')
d7 = pd.merge(d6, df_17, on='siteid', how='outer')
d8 = pd.merge(d7, df_18, on='siteid', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_siteid['num_of_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first = pd.DataFrame()
temp_first['siteid'] = df_sum_all_Years_total['siteid']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['siteid'] = df_sum_all_Years['siteid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df20 = pd.merge(avg_siteid, temp_first, on='siteid', how='outer')
total_avg_site_id = pd.merge(df20, temp_reop, on='siteid', how='outer')
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_avg_Firstop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_avg_reop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id.fillna(0,inplace=True)
total_avg_site_id.to_csv('total_avg_site_id.csv')
def groupby_surgid():
df2010 = df_2010.groupby('surgid')['surgid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('surgid')['surgid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('surgid')['surgid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('surgid')['surgid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('surgid')['surgid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('surgid')['surgid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('surgid')['surgid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('surgid')['surgid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('surgid')['surgid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('surgid')['surgid'].count().reset_index(name='2019_total')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years'])
df_sum_all_Years['Year_sum'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum'] / df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years surgid.csv")
print("details on surg id dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years'] != 10]
less_8.to_csv("total op less 10 years surgid.csv")
print("num of surgid with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
# avg_surgid['surgid'] = df_sum_all_Years['surgid']
# avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['total_year_count'] = df_sum_all_Years['Year_sum']
avg_surgid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_surgid_prcab():
df2010 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_surgid['num_of_years']
df_sum_all_Years.to_csv("sum all years surgid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop surgid.csv")
print("num of surgid with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='surgid', how='outer')
d2 = pd.merge(d1, df_12, on='surgid', how='outer')
d3 = pd.merge(d2, df_13, on='surgid', how='outer')
d4 = pd.merge(d3, df_14, on='surgid', how='outer')
d5 = pd.merge(d4, df_15, on='surgid', how='outer')
d6 = pd.merge(d5, df_16, on='surgid', how='outer')
d7 = pd.merge(d6, df_17, on='surgid', how='outer')
d8 = | pd.merge(d7, df_18, on='surgid', how='outer') | pandas.merge |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
| pd.DataFrame([valrow], columns=VALICOLS) | pandas.DataFrame |
import numpy
import matplotlib.pyplot as plt
import pandas
from scipy.stats import pearsonr
import seaborn as sns
import plotting
import torch
#Load experimental data
viabilitydata = pandas.read_csv('viabilityModel/viability.tsv', sep='\t', low_memory=False, index_col=0)
drugData = | pandas.read_csv('viabilityModel/drug.tsv', sep='\t', low_memory=False, index_col=0) | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), | pd.Series(actual) | pandas.Series |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import random
import warnings
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import structure_generator
from deap import base
from deap import creator
from deap import tools
from gtm import GTM
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
warnings.filterwarnings('ignore')
target_y_value = [1, -60, 30] # y-target for inverse analysis
dataset = pd.read_csv('molecules_with_multi_y.csv', index_col=0) # SMILES 付きデータセットの読み込み
target_ranges = pd.read_csv('settings_of_target_ys.csv', index_col=0) # 各 y の目標範囲の読み込み
file_name_of_main_fragments = 'sample_main_fragments_logS.smi' # 'r_group' 主骨格のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります。
file_name_of_sub_fragments = 'sample_sub_fragments.smi' # 'r_group' 側鎖のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります
deleting_descriptor_names = ['Ipc', 'Kappa3']
#deleting_descriptor_names = []
number_of_iteration_of_ga = 10 # GA による構造生成を何回繰り返すか (number_of_iteration_of_ga × number_of_population) の数だけ化学構造が得られます
shape_of_map = [30, 30]
shape_of_rbf_centers = [8, 8]
variance_of_rbfs = 0.03125
lambda_in_em_algorithm = 0.5
number_of_iterations = 300
display_flag = 1
number_of_population = 30 # GA の個体数
number_of_generation = 50 # GA の世代数
probability_of_crossover = 0.5
probability_of_mutation = 0.2
threshold_of_variable_selection = 0.5
minimum_number = -10 ** 30
smiles = dataset.iloc[:, 0] # 分子の SMILES
y = dataset.iloc[:, 1:] # 物性・活性などの目的変数
numbers_of_y = np.arange(y.shape[1])
# 計算する記述子名の取得
descriptor_names = []
for descriptor_information in Descriptors.descList:
descriptor_names.append(descriptor_information[0])
print('計算する記述子の数 :', len(descriptor_names))
# 記述子の計算
descriptor_calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptor_names)
descriptors = [] # ここに計算された記述子の値を追加
print('分子の数 :', len(smiles))
for index, smiles_i in enumerate(smiles):
print(index + 1, '/', len(smiles))
molecule = Chem.MolFromSmiles(smiles_i)
descriptors.append(descriptor_calculator.CalcDescriptors(molecule))
original_x = pd.DataFrame(descriptors, index=dataset.index, columns=descriptor_names)
if deleting_descriptor_names is not None:
original_x = original_x.drop(deleting_descriptor_names, axis=1)
original_x = original_x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = original_x.isnull().any() # NaN を含む変数
original_x = original_x.drop(original_x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = original_x.std() == 0
x = original_x.drop(original_x.columns[std_0_variable_flags], axis=1)
variables = pd.concat([y, x], axis=1)
numbers_of_x = np.arange(numbers_of_y[-1] + 1, variables.shape[1])
# standardize x and y
autoscaled_variables = (variables - variables.mean(axis=0)) / variables.std(axis=0, ddof=1)
autoscaled_target_y_value = (target_y_value - variables.mean(axis=0)[numbers_of_y]) / variables.std(axis=0, ddof=1)[
numbers_of_y]
# construct GTMR model
model = GTM(shape_of_map, shape_of_rbf_centers, variance_of_rbfs, lambda_in_em_algorithm, number_of_iterations,
display_flag)
model.fit(autoscaled_variables)
if model.success_flag:
# calculate of responsibilities
responsibilities = model.responsibility(autoscaled_variables)
means = responsibilities.dot(model.map_grids)
modes = model.map_grids[responsibilities.argmax(axis=1), :]
mean_of_estimated_mean_of_y, mode_of_estimated_mean_of_y, responsibilities_y, py = \
model.gtmr_predict(autoscaled_variables.iloc[:, numbers_of_x], numbers_of_x, numbers_of_y)
plt.rcParams['font.size'] = 18
for index, y_number in enumerate(numbers_of_y):
predicted_y_test = mode_of_estimated_mean_of_y[:, index] * variables.iloc[:, y_number].std() + variables.iloc[:,
y_number].mean()
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(variables.iloc[:, y_number], predicted_y_test)
y_max = np.max(np.array([np.array(variables.iloc[:, y_number]), predicted_y_test]))
y_min = np.min(np.array([np.array(variables.iloc[:, y_number]), predicted_y_test]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y')
plt.show()
# r2, RMSE, MAE
print(
'r2: {0}'.format(float(1 - sum((variables.iloc[:, y_number] - predicted_y_test) ** 2) / sum(
(variables.iloc[:, y_number] - variables.iloc[:, y_number].mean()) ** 2))))
print('RMSE: {0}'.format(float((sum((variables.iloc[:, y_number] - predicted_y_test) ** 2) / len(
variables.iloc[:, y_number])) ** 0.5)))
print('MAE: {0}'.format(float(sum(abs(variables.iloc[:, y_number] - predicted_y_test)) / len(
variables.iloc[:, y_number]))))
# plot the mean of responsibilities
plt.scatter(means[:, 0], means[:, 1], c=variables.iloc[:, y_number])
plt.colorbar()
plt.ylim(-1.1, 1.1)
plt.xlim(-1.1, 1.1)
plt.xlabel('z1 (mean)')
plt.ylabel('z2 (mean)')
plt.show()
# plot the mode of responsibilities
plt.scatter(modes[:, 0], modes[:, 1], c=variables.iloc[:, y_number])
plt.colorbar()
plt.ylim(-1.1, 1.1)
plt.xlim(-1.1, 1.1)
plt.xlabel('z1 (mode)')
plt.ylabel('z2 (mode)')
plt.show()
# GTMR prediction for inverse analysis
autoscaled_mean_of_estimated_mean_of_x, autoscaled_mode_of_estimated_mean_of_x, responsibilities_y, py = \
model.gtmr_predict(autoscaled_target_y_value, numbers_of_y, numbers_of_x)
# Check results of inverse analysis
print('Results of inverse analysis')
mean_of_estimated_mean_of_x = pd.DataFrame(autoscaled_mean_of_estimated_mean_of_x, columns=x.columns) * x.std(
axis=0, ddof=1) + x.mean(axis=0)
mode_of_estimated_mean_of_x = pd.DataFrame(autoscaled_mode_of_estimated_mean_of_x, columns=x.columns) * x.std(
axis=0, ddof=1) + x.mean(axis=0)
print('estimated x-mode: {0}'.format(mode_of_estimated_mean_of_x))
estimated_x_mean_on_map = responsibilities_y.dot(model.map_grids)
estimated_x_mode_on_map = model.map_grids[np.argmax(responsibilities_y), :]
print('estimated x-mode on map: {0}'.format(estimated_x_mode_on_map))
plt.scatter(modes[:, 0], modes[:, 1], c='blue')
plt.scatter(estimated_x_mode_on_map[0], estimated_x_mode_on_map[1], c='red', marker='x', s=100)
plt.ylim(-1.1, 1.1)
plt.xlim(-1.1, 1.1)
plt.xlabel('z1 (mode)')
plt.ylabel('z2 (mode)')
plt.show()
# 構造生成
main_molecules = [molecule for molecule in Chem.SmilesMolSupplier(file_name_of_main_fragments,
delimiter='\t', titleLine=False)
if molecule is not None]
fragment_molecules = [molecule for molecule in Chem.SmilesMolSupplier(file_name_of_sub_fragments,
delimiter='\t', titleLine=False)
if molecule is not None]
creator.create('FitnessMax', base.Fitness, weights=(1.0,)) # for minimization, set weights as (-1.0,)
creator.create('Individual', list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
min_boundary = np.zeros(len(fragment_molecules) + 1)
max_boundary = np.ones(len(fragment_molecules) + 1) * 1.0
def create_ind_uniform(min_boundary, max_boundary):
index = []
for min, max in zip(min_boundary, max_boundary):
index.append(random.uniform(min, max))
return index
toolbox.register('create_ind', create_ind_uniform, min_boundary, max_boundary)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.create_ind)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
individual_array = np.array(individual)
generated_smiles = structure_generator.structure_generator_based_on_r_group(main_molecules, fragment_molecules,
individual_array)
generated_molecule = Chem.MolFromSmiles(generated_smiles)
if generated_molecule is not None:
AllChem.Compute2DCoords(generated_molecule)
descriptors_of_generated_molecule = descriptor_calculator.CalcDescriptors(generated_molecule)
descriptors_of_generated_molecule = pd.DataFrame(descriptors_of_generated_molecule, index=descriptor_names)
descriptors_of_generated_molecule = descriptors_of_generated_molecule.T
if deleting_descriptor_names is not None:
descriptors_of_generated_molecule = descriptors_of_generated_molecule.drop(deleting_descriptor_names,
axis=1)
descriptors_of_generated_molecule = descriptors_of_generated_molecule.drop(
descriptors_of_generated_molecule.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
descriptors_of_generated_molecule = descriptors_of_generated_molecule.drop(
descriptors_of_generated_molecule.columns[std_0_variable_flags], axis=1)
descriptors_of_generated_molecule = descriptors_of_generated_molecule.replace(np.inf, np.nan).fillna(
np.nan) # inf を NaN に置き換え
if descriptors_of_generated_molecule.isnull().sum(axis=1)[0] > 0:
value = minimum_number
else:
# オートスケーリング
autoscaled_x_prediction = (descriptors_of_generated_molecule - x.mean()) / x.std()
distance = (((autoscaled_mode_of_estimated_mean_of_x[0, :] - autoscaled_x_prediction.values[0,
:]) ** 2).sum()) ** 0.5
value = 1 / distance
else:
value = minimum_number
return value,
toolbox.register('evaluate', evalOneMax)
toolbox.register('mate', tools.cxTwoPoint)
toolbox.register('mutate', tools.mutFlipBit, indpb=0.05)
toolbox.register('select', tools.selTournament, tournsize=3)
generated_smiles_all = []
estimated_y_all = []
for iteration_number in range(number_of_iteration_of_ga):
print(iteration_number + 1, '/', number_of_iteration_of_ga)
# random.seed(100)
random.seed()
pop = toolbox.population(n=number_of_population)
print('Start of evolution')
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
print(' Evaluated %i individuals' % len(pop))
for generation in range(number_of_generation):
print('-- Generation {0} --'.format(generation + 1))
offspring = toolbox.select(pop, len(pop))
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < probability_of_crossover:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < probability_of_mutation:
toolbox.mutate(mutant)
del mutant.fitness.values
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print(' Evaluated %i individuals' % len(invalid_ind))
pop[:] = offspring
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x * x for x in fits)
std = abs(sum2 / length - mean ** 2) ** 0.5
print(' Min %s' % min(fits))
print(' Max %s' % max(fits))
print(' Avg %s' % mean)
print(' Std %s' % std)
print('-- End of (successful) evolution --')
for each_pop in pop:
if each_pop.fitness.values[0] > minimum_number / 2:
estimated_y_all.append(each_pop.fitness.values[0])
each_pop_array = np.array(each_pop)
smiles = structure_generator.structure_generator_based_on_r_group(main_molecules, fragment_molecules,
each_pop_array)
generated_smiles_all.append(smiles)
estimated_y_all = | pd.DataFrame(estimated_y_all, index=generated_smiles_all, columns=['estimated_y']) | pandas.DataFrame |
"""Tests Anomaly Controller Functions."""
from dataclasses import dataclass
from datetime import datetime
import pandas as pd
from pandas.core.base import NoNewAttributesMixin
import pytest
from _pytest.monkeypatch import MonkeyPatch
from pandas.testing import assert_frame_equal
from chaos_genius.core.anomaly.controller import AnomalyDetectionController
from chaos_genius.databases.models.data_source_model import DataSource
def load_input_data(file_name):
"""Load the test data."""
df = | pd.read_csv(file_name) | pandas.read_csv |
"""
Author: <NAME>
Python 3.8.3
"""
import argparse
import os
from pathlib import Path
import sys
from pyfaidx import Fasta
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly
# Set working directory to current directory
sys.path.insert(0, os.path.realpath(os.path.dirname(__file__)))
os.chdir(os.path.realpath(os.path.dirname(__file__)))
def phased_haplotype_analysis():
"""
This script compares the TrioCanu output's from a reference cross
(bio-parent x bio-parent) to a replacement cross of one or both bio-parents.
It provides a breakdown of the number of reads sorted to the same bins in both
crosses (correctly sorted) and the number of reads that were sorted differently
(incorrectly sorted) from the reference cross. It also further breaks down
the incorrectly sorted reads into bins describing where the read was
incorrectly sorted to in the replacement cross.
"""
# ---------------- Argparse inputs ----------------
parser = argparse.ArgumentParser(description="Phased Haplotype Analysis (PHA)")
# Required input arguments
parser.add_argument('--bio_mom', type=str, required=True,
action='store', help='Pathway to biological mothers fasta file')
parser.add_argument('--bio_dad', type=str, required=True,
action='store', help='Pathway to biological fathers fasta file')
parser.add_argument('--bio_unknown', type=str, required=True,
action='store', help='Pathway to biological unknown fasta file')
parser.add_argument('--nonbio_mom', type=str, required=True,
action='store', help='Pathway to non-biological mothers fasta file')
parser.add_argument('--nonbio_dad', type=str, required=True,
action='store', help='Pathway to non-biological mothers fasta file')
parser.add_argument('--nonbio_unknown', type=str, required=True,
action='store', help='Pathway to non-biological unknown fasta file')
parser.add_argument('--cross', type=str, required=True, action='store',
help="Name of replacement cross (i.e. LilBubxPbe14).")
parser.add_argument('--biocross', type=str, required=True, action='store',
help="Name of replacement cross (i.e. LilBubxPbe14).")
parser.add_argument('--excel', type=str, required=True, action='store',
help="Excel file with TrioCanu output read counts and coverages.")
# Optional - defaults to False
parser.add_argument('--show_graph', action='store_True',
default=False, help='Automatically load graph into browser')
args = parser.parse_args()
# input variables
bio_mom = args.bio_mom
bio_dad = args.bio_dad
bio_unknown = args.bio_unknown
non_bio_mom = args.nonbio_mom
non_bio_dad = args.nonbio_dad
non_bio_unknown = args.nonbio_unknown
cross_used = args.cross
bio_parents = args.biocross
auto_open = args.show_graph
excel_file_pathway = args.excel
# ---------------- Helper Functions ----------------
def output_directory_setup():
# Create cross used output directory
Path(f"./read_names/{cross_used}").mkdir(parents=True, exist_ok=True)
# Create data output directory
isr_pathway = f"./data_output/{cross_used}/incorrectly_sorted_reads/"
Path(isr_pathway).mkdir(parents=True, exist_ok=True)
return
def self_check(df, individual):
"""
This function will perform a duplicate drop test on each file to see if a particular individual contains duplicate reads.
"""
df_copy = df.copy()
df_copy.sort_values(by='names', inplace=True)
before_drop_length = len(df_copy)
df_copy.drop_duplicates(subset='names', keep=False, inplace=True)
if before_drop_length - len(df_copy) == 0:
print("--------------------------------------------")
print("\t-- Duplicate Self Check --")
print(
"|-- {} input file **DOES NOT** contain any duplicate reads".format(individual))
return True
else:
print(
"|-- {} input file **DOES** contain any duplicate reads".format(individual))
return False
def read_names_to_csv(file_pathway, sample_name):
"""
This function takes in the fasta files and pulls the read names and then outputs them into csv files.
:param file_pathway: Pathway to Fasta files
:param sample_name: Name of current sample being loaded (i.e. bio_mom)
:return: The output filename
"""
output_filename = f"./read_names/{cross_used}/{sample_name}.csv"
output_df = pd.DataFrame()
print(f"-- Loading {sample_name} Fasta file --")
file_read = Fasta(file_pathway)
print(f"-- Loaded {sample_name} Fasta file --")
file_read_names_list = [name for name in file_read.keys()]
file_read_names_list.sort()
output_df['names'] = file_read_names_list
print(f"-- Output {sample_name} read names to {sample_name}.csv --")
output_df.to_csv(output_filename, sep=',', index=False)
print(f'-- {sample_name} finished --')
return output_filename
def mom_check(bio_mom, nonbio_mom):
"""
This function takes in the bio and non-bio mom reads and...
1. Merges the two list together
2. Drops any duplicate (correctly sorted reads)
3. Separates the remaining reads back into bio and non-bio lists.
The remaining bio-mom reads are the reads lost in the sorting.
The remaining nonbio-mom reads are the reads that were gained in the sorting.
"""
# Run self check to see if any individuals contain duplicate reads
self_check(bio_mom, 'bio_mom')
self_check(nonbio_mom, 'nonbio_mom')
concat_df = pd.concat([bio_mom, nonbio_mom], sort=True)
concat_df = concat_df.reset_index(drop=True)
concat_df.sort_values(by='names', inplace=True)
before_length = len(concat_df)
print("--------------------------------------------")
print('|--')
print("|-- Maternal read count before drop:", len(concat_df))
concat_df.drop_duplicates(subset='names', keep=False, inplace=True)
print("|-- Maternal read count after drop:", len(concat_df))
print("|-- Maternal read count difference:",
(before_length - len(concat_df)))
print('|--')
num_correctly_sorted = (before_length - len(concat_df)) / 2
nonbio_mom_tally = []
[nonbio_mom_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'nonbio_mom']
bio_mom_tally = []
[bio_mom_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'bio_mom']
lost_mom_reads = concat_df[concat_df.isin(
{'bio_mom'}).any(1)] # removed ['names']
gained_mom_reads = concat_df[concat_df.isin(
{'nonbio_mom'}).any(1)] # removed ['names']
lost_mom_reads.reset_index(inplace=True, drop=True)
gained_mom_reads.reset_index(inplace=True, drop=True)
print("--------------------------------------------")
print('|--')
print('|-- Total maternal reads correctly sorted:', num_correctly_sorted)
print('|-- Total maternal reads gained:', sum(nonbio_mom_tally))
print('|-- Total maternal reads lost:', sum(bio_mom_tally))
print('|-- Total gained+lost:',
sum(bio_mom_tally) + sum(nonbio_mom_tally))
print('|--')
print("--------------------------------------------")
print("------ Maternal Read Sorting Complete ------")
print("--------------------------------------------")
print()
print("\t-- Moving to Paternal Reads --\t")
return lost_mom_reads, gained_mom_reads, num_correctly_sorted
def dad_check(bio_dad, nonbio_dad):
"""
This function takes in the bio and non-bio dad reads and...
1. Merges the two list together
2. Drops any duplicate (correctly sorted reads)
3. Separates the remaining reads back into bio and non-bio lists.
The remaining bio-dad reads are the reads lost in the sorting.
The remaining nonbio-dad reads are the reads that were gained in the sorting.
"""
# Run self check to see if any individuals contain duplicate reads
self_check(bio_dad, 'bio_dad')
self_check(nonbio_dad, 'nonbio_dad')
concat_df = pd.concat([bio_dad, nonbio_dad], sort=True)
concat_df = concat_df.reset_index(drop=True)
concat_df.sort_values(by='names', inplace=True)
before_length = len(concat_df)
print("--------------------------------------------")
print('|--')
print("|-- Paternal before drop count:", len(concat_df))
concat_df.drop_duplicates(subset='names', keep=False, inplace=True)
print("|-- Paternal after drop count:", len(concat_df))
print("|-- Paternal read count difference:",
(before_length - len(concat_df)))
print('|--')
# Divide by two because we dropped two copies of each read
num_correctly_sorted = (before_length - len(concat_df)) / 2
nonbio_dad_tally = []
[nonbio_dad_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'nonbio_dad']
bio_dad_tally = []
[bio_dad_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'bio_dad']
lost_dad_reads = concat_df[concat_df.isin({'bio_dad'}).any(1)]
gained_dad_reads = concat_df[concat_df.isin({'nonbio_dad'}).any(1)]
lost_dad_reads.reset_index(inplace=True, drop=True)
gained_dad_reads.reset_index(inplace=True, drop=True)
print("--------------------------------------------")
print('|--')
print('|-- Total paternal reads correctly sorted:', num_correctly_sorted)
print('|-- Total paternal reads gained:', sum(nonbio_dad_tally))
print('|-- Total paternal reads lost:', sum(bio_dad_tally))
print('|-- Total gained+lost:',
sum(bio_dad_tally) + sum(nonbio_dad_tally))
print('|--')
print("--------------------------------------------")
print("------ Paternal Read Sorting Complete ------")
print("--------------------------------------------")
print()
print("\t-- Moving to Unknown Reads --\t")
return lost_dad_reads, gained_dad_reads, num_correctly_sorted
def unknown_check(bio_unknown, nonbio_unknown):
"""
This function takes in the bio and non-bio unknown reads and...
1. Merges the two list together
2. Drops any duplicate (correctly sorted reads)
3. Separates the remaining reads back into bio and non-bio lists.
The remaining bio-unknown reads are the reads lost in the sorting.
The remaining nonbio-unknown reads are the reads that were gained in the sorting.
"""
# Run self check to see if any individuals contain duplicate reads
self_check(bio_unknown, 'bio_unknown')
self_check(nonbio_unknown, 'nonbio_unknown')
concat_df = pd.concat([bio_unknown, nonbio_unknown], sort=True)
concat_df = concat_df.reset_index(drop=True)
concat_df.sort_values(by='names', inplace=True)
before_length = len(concat_df)
print("--------------------------------------------")
print('|--')
print("|-- Unknown before drop count:", len(concat_df))
concat_df.drop_duplicates(subset='names', keep=False, inplace=True)
print("|-- Unknown after drop count:", len(concat_df))
print("|-- Unknown read count difference:",
(before_length - len(concat_df)))
print('|--')
num_correctly_sorted = (before_length - len(concat_df)) / 2
nonbio_unknown_tally = []
[nonbio_unknown_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'nonbio_unknown']
bio_unknown_tally = []
[bio_unknown_tally.append(
1) for relationship in concat_df["parental_relationships"] if relationship == 'bio_unknown']
lost_unknown_reads = concat_df[concat_df.isin({'bio_unknown'}).any(1)]
gained_unknown_reads = concat_df[concat_df.isin(
{'nonbio_unknown'}).any(1)]
lost_unknown_reads.reset_index(inplace=True, drop=True)
gained_unknown_reads.reset_index(inplace=True, drop=True)
print("--------------------------------------------")
print('|--')
print('|-- Total unknown reads correctly sorted:', num_correctly_sorted)
print('|-- Total unknown reads gained:', sum(nonbio_unknown_tally))
print('|-- Total unknown reads lost:', sum(bio_unknown_tally))
print('|-- Total gained+lost:',
sum(bio_unknown_tally) + sum(nonbio_unknown_tally))
print('|--')
print("--------------------------------------------")
print("------ Unknown Read Sorting Complete ------")
print("--------------------------------------------")
print()
print("\t*** -- Initiating Phase 2 -- ***\t\n")
return lost_unknown_reads, gained_unknown_reads, num_correctly_sorted
def maternal_load_n_run(bio_mom, non_bio_mom):
bio_mom_read_to_csv = read_names_to_csv(bio_mom, 'bio_mom')
bio_mom_read_names = pd.read_csv(
bio_mom_read_to_csv, dtype={'names': str})
bm_df = pd.DataFrame(
[value for value in bio_mom_read_names['names']], columns=['names'])
bm_df['parental_relationships'] = [
'bio_mom' for _ in range(len(bm_df.index))]
bio_mom_read_count = len(bm_df)
stats_collecting_df.at["biomom_initial_read_count", str(
cross_used)] = bio_mom_read_count
non_bio_mom_read_to_csv = read_names_to_csv(non_bio_mom, 'nonbio_mom')
non_bio_mom_read_names = pd.read_csv(
non_bio_mom_read_to_csv, dtype={'names': str})
nbm_df = pd.DataFrame(
[value for value in non_bio_mom_read_names['names']], columns=['names'])
nbm_df['parental_relationships'] = [
'nonbio_mom' for _ in range(len(nbm_df.index))]
print("--------------------------------------------")
print("-- Maternal gained/lost read information --")
print("--------------------------------------------")
print('|--')
print('|-- Number of bio mom reads:', len(bm_df))
print('|-- Number of nonbio mom reads:', len(nbm_df))
print('|--')
mom_check_output = mom_check(bm_df, nbm_df)
return mom_check_output, bio_mom_read_count
def paternal_load_n_run(bio_dad, non_bio_dad):
bio_dad_read_to_csv = read_names_to_csv(bio_dad, 'bio_dad')
bio_dad_read_names = pd.read_csv(
bio_dad_read_to_csv, dtype={'names': str})
bd_df = pd.DataFrame(
[value for value in bio_dad_read_names['names']], columns=['names'])
bd_df['parental_relationships'] = [
'bio_dad' for _ in range(len(bd_df.index))]
bio_dad_read_count = len(bd_df)
stats_collecting_df.at["biodad_initial_read_count", str(
cross_used)] = bio_dad_read_count
non_bio_dad_read_to_csv = read_names_to_csv(non_bio_dad, 'nonbio_dad')
non_bio_dad_read_names = pd.read_csv(
non_bio_dad_read_to_csv, dtype={'names': str})
nbd_df = pd.DataFrame(
[value for value in non_bio_dad_read_names['names']], columns=['names'])
nbd_df['parental_relationships'] = [
'nonbio_dad' for _ in range(len(nbd_df.index))]
print()
print("--------------------------------------------")
print("-- Paternal gained/lost read information --")
print("--------------------------------------------")
print('|--')
print('|-- Number of bio dad reads:', len(bd_df))
print('|-- Number of nonbio dad reads:', len(nbd_df))
print('|--')
dad_check_output = dad_check(bd_df, nbd_df)
return dad_check_output, bio_dad_read_count
def unknown_load_n_run(bio_unknown, non_bio_unknown):
# Load Bio Unknown and py
bio_unknown_read_to_csv = read_names_to_csv(bio_unknown, 'bio_unknown')
bio_unknown_read_names = pd.read_csv(
bio_unknown_read_to_csv, dtype={'names': str})
bu_df = pd.DataFrame(
[value for value in bio_unknown_read_names['names']], columns=['names'])
bu_df['parental_relationships'] = [
'bio_unknown' for _ in range(len(bu_df.index))]
bio_unknown_read_count = len(bu_df)
stats_collecting_df.at["biounknown_initial_read_count", str(
cross_used)] = bio_unknown_read_count
non_bio_unknown_read_to_csv = read_names_to_csv(
non_bio_unknown, 'nonbio_unknown')
non_bio_unknown_read_names = pd.read_csv(
non_bio_unknown_read_to_csv, dtype={'names': str})
nbu_df = pd.DataFrame(
[value for value in non_bio_unknown_read_names['names']], columns=['names'])
nbu_df['parental_relationships'] = [
'nonbio_unknown' for _ in range(len(nbu_df.index))]
print()
print("--------------------------------------------")
print("----Unknown gained/lost read information----")
print("--------------------------------------------")
print('|--')
print('|-- Number of bio unknown reads:', len(bu_df))
print('|-- Number of nonbio unknown reads:', len(nbu_df))
unknown_check_output = unknown_check(bu_df, nbu_df)
return unknown_check_output, bio_unknown_read_count
def phase_2(maternal_lists, paternal_lists, unknown_lists):
"""Indentify number and direction of incorrect sorting"""
# Lists are ordered [lost reads (bio), gained reads (nonbio)]
maternal_lost_reads = maternal_lists[0]
maternal_gained_reads = maternal_lists[1]
paternal_lost_reads = paternal_lists[0]
paternal_gained_reads = paternal_lists[1]
unknown_lost_reads = unknown_lists[0]
unknown_gained_reads = unknown_lists[1]
def maternal_to_other_stats(maternal_lost, paternal_gained, unknown_gained):
def paternal_gain_check(maternal_lost, paternal_gain_list):
concat = pd.concat([maternal_lost, paternal_gain_list])
before_length = len(concat)
before_drop_df = concat.copy()
before_drop_df.drop_duplicates(
subset='names', keep='first', inplace=True)
concat.drop_duplicates(
subset='names', keep=False, inplace=True)
difference = before_length - len(concat)
number_of_reads = int(difference / 2)
duplicate_df = pd.concat([concat, before_drop_df])
duplicate_df.sort_values(by='names', inplace=True)
duplicate_df.drop_duplicates(
subset='names', keep=False, inplace=True)
duplicate_df['names'].to_csv(
"./data_output/{}/incorrectly_sorted_reads/mom_to_dad_reads.csv".format(cross_used), sep=',', index=False)
return number_of_reads
def unknown_gain_check(maternal_lost, unknown_gain_list):
concat = pd.concat([maternal_lost, unknown_gain_list])
before_length = len(concat)
before_drop_df = concat.copy()
before_drop_df.drop_duplicates(
subset='names', keep='first', inplace=True)
concat.drop_duplicates(
subset='names', keep=False, inplace=True)
difference = before_length - len(concat)
number_of_reads = int(difference / 2)
duplicate_df = pd.concat([concat, before_drop_df])
duplicate_df.sort_values(by='names', inplace=True)
duplicate_df.drop_duplicates(
subset='names', keep=False, inplace=True)
duplicate_df['names'].to_csv(
"./data_output/{}/incorrectly_sorted_reads/mom_to_unknown_reads.csv".format(cross_used), sep=',', index=False)
return number_of_reads
count_to_dad = paternal_gain_check(maternal_lost, paternal_gained)
count_to_unknown = unknown_gain_check(
maternal_lost, unknown_gained)
count = count_to_dad + count_to_unknown
print("--------------------------------------------")
print('\t-- Maternal Check Complete --')
print('|-- Num. Reads 2 Dad = {}'. format(count_to_dad))
print('|-- Num. Reads 2 Unknown = {}'. format(count_to_unknown))
print('|-- Total Moved Reads = {}'. format(count))
print("--------------------------------------------")
return count_to_dad, count_to_unknown
def paternal_to_other_stats(paternal_lost, maternal_gained, unknown_gained):
def maternal_gain_check(paternal_lost, maternal_gained):
concat = pd.concat([paternal_lost, maternal_gained])
before_length = len(concat)
before_drop_df = concat.copy()
before_drop_df.drop_duplicates(
subset='names', keep='first', inplace=True)
concat.drop_duplicates(
subset='names', keep=False, inplace=True)
difference = before_length - len(concat)
number_of_reads = int(difference / 2)
duplicate_df = pd.concat([concat, before_drop_df])
duplicate_df.sort_values(by='names', inplace=True)
duplicate_df.drop_duplicates(
subset='names', keep=False, inplace=True)
duplicate_df['names'].to_csv(
"./data_output/{}/incorrectly_sorted_reads/dad_to_mom_reads.csv".format(cross_used), sep=',', index=False)
return number_of_reads
def unknown_gain_check(paternal_lost, unknown_gained):
concat = pd.concat([paternal_lost, unknown_gained])
before_length = len(concat)
before_drop_df = concat.copy()
before_drop_df.drop_duplicates(
subset='names', keep='first', inplace=True)
concat.drop_duplicates(
subset='names', keep=False, inplace=True)
difference = before_length - len(concat)
number_of_reads = int(difference / 2)
duplicate_df = | pd.concat([concat, before_drop_df]) | pandas.concat |
from __future__ import division, print_function
from datetime import timedelta
from jitterbug import *
from supervised_models import TM,SVM,RF,DT,NB,LR
from pdb import set_trace
import matplotlib.pyplot as plt
from os import listdir
from collections import Counter
import pandas as pd
from demos import cmd
try:
import cPickle as pickle
except:
import pickle
import warnings
warnings.filterwarnings('ignore')
def parse(path = "../data/"):
for file in listdir(path):
df = pd.read_csv("../data/"+file)
df.rename(columns={'commenttext':'Abstract'}, inplace=True)
df['label'] = ["no" if type=="WITHOUT_CLASSIFICATION" else "yes" for type in df["classification"]]
df['ID'] = range(len(df))
df = df[["ID","projectname","classification","Abstract","label",]]
df.to_csv("../new_data/original/"+file, line_terminator="\r\n", index=False)
def find_patterns(target='apache-ant-1.7.0'):
data=load_csv(path="../new_data/original/")
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
print("Patterns:")
print(patterns)
print("Precisions on training set:")
print({p: jitterbug.easy.precs[i] for i,p in enumerate(patterns)})
def validate_ground_truth(target='apache-ant-1.7.0'):
data=load_csv(path="../new_data/original/")
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
jitterbug.easy_code(patterns)
jitterbug.output_conflicts(output="../new_data/conflicts/")
def summarize_validate(input = "../new_data/validate/",output="../results/"):
data=load_csv(input)
columns = ["Double Check"]+list(data.keys())
result = {}
result["Double Check"] = ["yes (Easy)","no (GT)"]
for project in data:
count = Counter(data[project]["validate"])
result[project]=[count["yes"],count["no"]]
df = pd.DataFrame(data=result,columns=columns)
df.to_csv(output+"validate_sum.csv", line_terminator="\r\n", index=False)
def correct_ground_truth(validated="../new_data/validate/", output="../new_data/corrected/"):
data = load_csv(path="../new_data/original/")
data_validated = load_csv(path=validated)
for project in data:
for id in data_validated[project][data_validated[project]["validate"]=="yes"]["ID"]:
data[project]["label"][id]="yes"
data[project].to_csv(output+project+".csv", line_terminator="\r\n", index=False)
stats = Counter(data_validated[project]["validate"])
ratio = float(stats["yes"])/(stats["yes"]+stats["no"])
print(project)
print(ratio)
def Easy_results(source="corrected",output="../results/"):
input = "../new_data/"+source+"/"
data=load_csv(path=input)
results = {"Metrics":["Precision","Recall","F1"]}
for target in data:
jitterbug = Jitterbug(data,target)
patterns = jitterbug.find_patterns()
print(patterns)
print(jitterbug.easy.precs)
stats = jitterbug.test_patterns(output=True)
stats["t"] = len(data[target][data[target]["label"]=="yes"])
prec = float(stats['tp'])/stats['p']
rec = float(stats['tp'])/stats['t']
f1 = 2*prec*rec/(prec+rec)
results[target]=[prec,rec,f1]
df = pd.DataFrame(data=results,columns=["Metrics"]+list(data.keys()))
df.to_csv(output+"step1_Easy_"+source+".csv", line_terminator="\r\n", index=False)
def MAT_results(source="corrected",output="../results/"):
input = "../new_data/"+source+"/"
data=load_csv(path=input)
results = {"Metrics":["Precision","Recall","F1"]}
for target in data:
mat = MAT(data,target)
mat.preprocess()
mat.find_patterns()
stats = mat.test_patterns()
stats["t"] = len(data[target][data[target]["label"]=="yes"])
prec = float(stats['tp'])/stats['p']
rec = float(stats['tp'])/stats['t']
f1 = 2*prec*rec/(prec+rec)
results[target]=[prec,rec,f1]
df = pd.DataFrame(data=results,columns=["Metrics"]+list(data.keys()))
df.to_csv(output+"step1_MAT_"+source+".csv", line_terminator="\r\n", index=False)
def fitness_pattern(pattern='xxx'):
data=load_csv(path="../new_data/original/")
fitness = {}
for target in data:
jitterbug = Jitterbug(data,target)
p_id = list(jitterbug.easy.voc).index(pattern)
poses = np.where(np.array(jitterbug.easy.y_label)== "yes")[0]
count_tp = np.array(np.sum(jitterbug.easy.test_data[poses], axis=0))[0][p_id]
count_p = np.array(np.sum(jitterbug.easy.test_data, axis=0))[0][p_id]
fitness[target] = np.nan_to_num(count_tp * (count_tp / count_p) ** 3)
print(fitness)
def rest_results(seed=0,input="../new_data/rest/",output="../results/"):
treatments = ["LR","DT","RF","SVM","NB","TM"]
data=load_csv(path=input)
columns = ["Treatment"]+list(data.keys())
# Supervised Learning Results
result = {target: [supervised_model(data,target,model=model,seed=seed) for model in treatments] for target in data}
result["Treatment"] = treatments
to_dump = {key: {"RF": result[key][2], "TM": result[key][5]} for key in data}
# Output results to tables
metrics = result[columns[-1]][0].keys()
for metric in metrics:
df = {key: (result[key] if key=="Treatment" else [dict[metric] for dict in result[key]]) for key in result}
pd.DataFrame(df,columns=columns).to_csv(output+"rest_"+metric+".csv", line_terminator="\r\n", index=False)
# Hard Results (continuous learning)
APFD_result = {}
AUC_result = {}
for target in data:
APFD_result[target] = []
AUC_result[target] = []
for model in treatments[:-1]:
jitterbug = Jitterbug_hard(data,target,model=model,seed=seed)
stats = jitterbug.eval()
APFD_result[target].append(stats['APFD'])
AUC_result[target].append(stats['AUC'])
if model=="RF":
to_dump[target]["Hard"] = stats
with open("../dump/rest_result.pickle","wb") as f:
pickle.dump(to_dump,f)
APFD_result["Treatment"] = treatments[:-1]
AUC_result["Treatment"] = treatments[:-1]
pd.DataFrame(APFD_result,columns=columns).to_csv(output+"rest_APFD_Hard.csv", line_terminator="\r\n", index=False)
pd.DataFrame(AUC_result,columns=columns).to_csv(output+"rest_AUC_Hard.csv", line_terminator="\r\n", index=False)
def estimate_results(seed=0,T_rec=0.90,model="RF",input="../new_data/rest/"):
data=load_csv(path=input)
# Hard Results
for target in data:
jitterbug = Jitterbug_hard(data,target,est=True,T_rec=T_rec,model=model,seed=seed)
jitterbug.hard.plot(T_rec=T_rec)
def overall_results(seed=0,input="../new_data/corrected/",output="../results/"):
data=load_csv(path=input)
columns = ["Treatment"] + list(data.keys())
APFDs = {"Treatment":["Jitterbug","Easy+RF","Hard","MAT+RF","TM","RF"]}
AUCs = {"Treatment":["Jitterbug","Easy+RF","Hard","MAT+RF","TM","RF"]}
results = {}
for target in data:
results[target]={}
APFDs[target] = []
AUCs[target] = []
print(target)
start = time.time()
stats = two_step_Jitterbug(data,target,seed=seed)
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["Jitterbug"] = stats
end = time.time()
print("Jitterbug")
print(str(timedelta(seconds=end-start)))
start = end
stats = two_step_Easy(data,target,seed=seed)
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["Easy+RF"] = stats
end = time.time()
print("Easy+RF")
print(str(timedelta(seconds=end - start)))
start = end
stats = Jitterbug_hard(data,target,est=False,seed=seed).eval()
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["Hard"] = stats
end = time.time()
print("Hard")
print(str(timedelta(seconds=end - start)))
start = end
stats = two_step_MAT(data,target,seed=seed)
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["MAT+RF"] = stats
end = time.time()
print("MAT+RF")
print(str(timedelta(seconds=end - start)))
start = end
stats = supervised_model(data,target, model = "RF",seed=seed)
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["RF"] = stats
end = time.time()
print("RF")
print(str(timedelta(seconds=end - start)))
start = end
stats = supervised_model(data,target, model = "TM",seed=seed)
APFDs[target].append(stats["APFD"])
AUCs[target].append(stats["AUC"])
results[target]["TM"] = stats
end = time.time()
print("TM")
print(str(timedelta(seconds=end - start)))
with open("../dump/overall_result.pickle","wb") as f:
pickle.dump(results,f)
pd.DataFrame(APFDs,columns=columns).to_csv(output+"overall_APFD.csv", line_terminator="\r\n", index=False)
pd.DataFrame(AUCs,columns=columns).to_csv(output+"overall_AUC.csv", line_terminator="\r\n", index=False)
def stopping_results(which="corrected",seed=0,input="../new_data/",output="../results/"):
data=load_csv(path=input+which+"/")
columns = ["Metrics"] + list(data.keys())
result = {"Metrics":["Precision","Recall","F1","Cost"]}
for target in data:
result[target] = []
stats = two_step_Jitterbug(data,target, est = True, T_rec=0.9, seed=seed)
for metric in result["Metrics"]:
result[target].append(stats[metric])
| pd.DataFrame(result,columns=columns) | pandas.DataFrame |
"""
Plot the evolution of validation/test set scores on fit_with_prune model refits.
1. For each seed,
1a. Initialize model and run fit_with_prune to obtain a list of models.
1b. On each model, run score to obtain its test set performance.
2. For each fit, compute average validation and test set scores.
3. Plot two sequences.
"""
from autogluon.core.data import LabelCleaner
from autogluon.core.utils import infer_problem_type
from autogluon.core.models import BaggedEnsembleModel
from autogluon.tabular.models.rf.rf_model import RFModel
from autogluon.tabular.models import KNNModel, RFModel, CatBoostModel, NNFastAiTabularModel, LGBModel
from autogluon.tabular import TabularDataset
from autogluon.features.generators import AutoMLPipelineFeatureGenerator
from sklearn.model_selection import train_test_split
import argparse
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', help='directory to save results', type=str, default='plots/score/UNNAMED')
parser.add_argument('-f', '--train_path', help='path to train dataset CSV', type=str, default=None)
parser.add_argument('-g', '--test_path', help='path to test dataset CSV', type=str, default=None)
parser.add_argument('-l', '--label', help='label column name', type=str, default='class')
parser.add_argument('-s', '--seeds', help='number of seeds to use', type=int, default=2)
parser.add_argument('-m', '--max_num_fit', help='maximum number of times the model will be fitted', type=int, default=50)
parser.add_argument('-d', '--stop_threshold', help='if score does not improve for this many iterations, stop feature pruning', type=int, default=3)
parser.add_argument('-p', '--prune_ratio', help='prune at most this amount of features at once per pruning iteration', type=float, default=0.05)
parser.add_argument('-r', '--resource', help='number of shuffles to evaluate per model fit iteration', type=int, default=None)
parser.add_argument('-t', '--strategy', help='which strategy to evaluate', type=str, default='uniform', choices=['uniform', 'backwardsearch'])
parser.add_argument('-u', '--subsample_size', help='how many subsamples to use per shuffle', type=int, default=5000)
parser.add_argument('-z', '--mode', help='which model to use', type=str, default='catboost', choices=['randomforest', 'catboost', 'fastai', 'knn', 'lightgbm'])
parser.add_argument('-b', '--bagged', help='whether to bag models. 0 for false and 1 for true.', type=int, default=0, choices=[0, 1])
args = parser.parse_args()
os.makedirs(args.name, exist_ok=True)
RESULT_DIR = args.name
# Load Data
if args.train_path is None:
fit_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
else:
fit_data = pd.read_csv(args.train_path)
if args.test_path is None:
test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
else:
test_data = pd.read_csv(args.test_path)
# import pdb; pdb.set_trace() ## NOTE: Try fitting with only noised data
# fit_data = fit_data[[feature for feature in fit_data.columns if 'noise_' in feature or args.label == feature]]
# test_data = test_data[[feature for feature in test_data.columns if 'noise_' in feature or args.label == feature]]
# On multiple seeds, fit model and evaluate accuracy
# fit_data = fit_data.head(10000) # subsample for faster demo
X_all, y_all = fit_data.drop(columns=[args.label]), fit_data[args.label]
X_test, y_test = test_data.drop(columns=[args.label]), test_data[args.label]
val_trajectories, test_trajectories = [], []
num_original_features, num_noised_features = [], []
for seed in range(args.seeds):
auto_ml_pipeline_feature_generator = AutoMLPipelineFeatureGenerator()
if args.mode == 'autogluon':
# TODO: Enable this with full autogluon run
pass
else:
# call fit_with_prune, return all models, and do stuff there
if args.mode == 'randomforest':
model = RFModel()
elif args.mode == 'fastai':
model = NNFastAiTabularModel()
elif args.mode == 'knn':
model = KNNModel()
elif args.mode == 'lightgbm':
model = LGBModel()
else:
model = CatBoostModel()
if args.strategy == 'uniform':
fi_strategy = 'uniform'
fp_strategy = 'percentage'
else:
fi_strategy = 'backwardsearch'
fp_strategy = 'single'
# clean data and call fit_with_prune
if args.bagged:
model = BaggedEnsembleModel(model, random_state=seed)
problem_type = infer_problem_type(y=y_all)
label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=y_all)
X_all_new = auto_ml_pipeline_feature_generator.fit_transform(X=X_all)
y_all_new = label_cleaner.transform(y_all)
best_model, all_model_info = model.fit_with_prune(X=X_all_new, y=y_all_new, X_val=None, y_val=None, max_num_fit=args.max_num_fit,
stop_threshold=args.stop_threshold, prune_ratio=0.1, num_resource=args.resource,
fi_strategy=fi_strategy, fp_strategy=fp_strategy, subsample_size=args.subsample_size,
prune_threshold=0.001)
else:
X, X_val, y, y_val = train_test_split(X_all, y_all, test_size=int(0.2*len(fit_data)), random_state=seed)
problem_type = infer_problem_type(y=y)
label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=y)
X = auto_ml_pipeline_feature_generator.fit_transform(X=X)
y = label_cleaner.transform(y)
X_val = auto_ml_pipeline_feature_generator.transform(X=X_val)
y_val = label_cleaner.transform(y_val)
best_model, all_model_info = model.fit_with_prune(X=X, y=y, X_val=X_val, y_val=y_val, max_num_fit=args.max_num_fit,
stop_threshold=args.stop_threshold, prune_ratio=0.1, num_resource=args.resource,
fi_strategy=fi_strategy, fp_strategy=fp_strategy, subsample_size=args.subsample_size,
prune_threshold=0.001)
X_test_new = auto_ml_pipeline_feature_generator.transform(X=X_test)
y_test_new = label_cleaner.transform(y_test)
val_trajectory, test_trajectory = [], []
num_original_feature, num_noised_feature = [], []
best_val_score, best_val_score_test = -float('inf'), -float('inf')
# plot evolution of test score of model with best validation loss
for i, model_info in enumerate(all_model_info):
val_score, model = model_info
if val_score > best_val_score:
best_val_score = val_score
best_val_score_test = model.score(X_test_new, y_test_new)
best_num_original_feature = len([feature for feature in model.features if 'noise_' not in feature])
best_num_noised_feature = len([feature for feature in model.features if 'noise_' in feature])
val_trajectory.append(best_val_score)
test_trajectory.append(best_val_score_test)
num_original_feature.append(best_num_original_feature)
num_noised_feature.append(best_num_noised_feature)
val_trajectories.append(val_trajectory)
test_trajectories.append(test_trajectory)
num_original_features.append(num_original_feature)
num_noised_features.append(num_noised_feature)
# pad trajectories so they are all of equal length
max_trajectory_len = max(list(map(lambda trajectory: len(trajectory), val_trajectories)))
for i in range(len(val_trajectories)):
val_trajectory, test_trajectory = val_trajectories[i], test_trajectories[i]
fill_len = max_trajectory_len - len(val_trajectory)
val_trajectories[i] = val_trajectory + fill_len*[val_trajectory[-1]]
test_trajectories[i] = test_trajectory + fill_len*[test_trajectory[-1]]
num_original_feature, num_noised_feature = num_original_features[i], num_noised_features[i]
num_original_features[i] = num_original_feature + fill_len*[num_original_feature[-1]]
num_noised_features[i] = num_noised_feature + fill_len*[num_noised_feature[-1]]
# plot
result_val = np.asarray(val_trajectories)
result_test = np.asarray(test_trajectories)
mean_val = result_val.mean(axis=0)
mean_test = result_test.mean(axis=0)
std_val = 1.96 * np.std(result_val, axis=0)
std_test = 1.96 * np.std(result_test, axis=0)
result_orig_feat = np.asarray(num_original_features)
result_noised_feat = np.asarray(num_noised_features)
mean_orig_feat = result_orig_feat.mean(axis=0)
mean_noised_feat = result_noised_feat.mean(axis=0)
std_orig_feat = np.std(result_orig_feat, axis=0)
std_noised_feat = np.std(result_noised_feat, axis=0)
x = [i+1 for i in range(max_trajectory_len)]
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].plot(x, mean_val, color='r')
ax[0].fill_between(x, mean_val-std_val, mean_val+std_val, color='r', alpha=.1)
ax[0].set_title(f"Validation and Test Set Scores")
ax[0].set_xlabel("Number of Model Fits")
ax[0].set_ylabel("Accuracy")
ax[0].plot(x, mean_test, color='b')
ax[0].fill_between(x, mean_test-std_test, mean_test+std_test, color='b', alpha=.1)
ax[0].legend([f"Val ({round(mean_val[0],4)}=>{round(mean_val[-1],4)})", f"Test ({round(mean_test[0],4)}=>{round(mean_test[-1],4)})"])
ax[1].plot(x, mean_orig_feat, color='g')
ax[1].fill_between(x, mean_orig_feat-std_orig_feat, mean_orig_feat+std_orig_feat, color='g', alpha=.1)
ax[1].set_title("Number of Kept Features")
ax[1].set_xlabel("Number of Model Fits")
ax[1].set_ylabel("Number of Kept Features")
ax[1].plot(x, mean_noised_feat, color='y')
ax[1].fill_between(x, mean_noised_feat-std_noised_feat, mean_noised_feat+std_noised_feat, color='y', alpha=.1)
ax[1].legend([f"# Original Features", f"# Synthetic Features"])
fig.suptitle(f"{'Bagged ' if args.bagged else ''}{args.mode.upper()} Stats From Strategy: {args.strategy}")
fig.tight_layout()
fig.savefig(f'{RESULT_DIR}/evolution.png')
# save trajectories
result_val_df = pd.DataFrame(val_trajectories)
result_test_df = | pd.DataFrame(test_trajectories) | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = | concat([df1, df2], axis=1) | pandas.concat |
# Run a classification experiment
# implements model fitting with equalized odds and demonstrates
# how to use equlized coverage for unbiased uncertainty estimation
# We rely on the nonconformist package and CQR package, available at
# https://github.com/donlnz/nonconformist
# https://github.com/yromano/cqr
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.getcwd() + '/others/third_party/fairness_aware_learning')))
sys.path.append(os.path.abspath(os.path.join(os.getcwd() + '/others/third_party/cqr')))
base_path = os.getcwd() + '/data/'
import torch
import random
import get_dataset
import numpy as np
import pandas as pd
from others import adv_debiasing
from others import continuous_fairness
from fair_dummies import utility_functions
from fair_dummies import fair_dummies_learning
from nonconformist.nc import ClassifierNc
from nonconformist.cp import IcpClassifier
from nonconformist.base import ClassifierAdapter
from nonconformist.nc import InverseProbabilityErrFunc
pd.set_option('precision', 4)
# Get length
def get_length(Y,predicted_C):
length = sum(predicted_C)
return length
# Get coverage
def get_coverage(Y,predicted_C):
coverage = int( predicted_C[int(Y)] == True )
return coverage
# Get coverage and length
def get_stat(Y,predicted_C):
coverage = int( predicted_C[int(Y)] == True )
length = sum(predicted_C)
return coverage, length
def class_compute_coverage_len(y_test, y_set):
results = [get_stat(y_test[test],y_set[test]) for test in range(len(y_test))]
results = list(zip(*results))
coverage = pd.DataFrame([row for row in results[0]])
length = | pd.DataFrame([r for r in results[1]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 12:21:39 2021
Generates demographics table of the ICU readmission dataset in eICU.
V2 has changes for readability, modularization, and efficiency, as well as a
few new additional outcomes. Also adds mann-whitney and chi-squared testing.
@author: Kirby
"""
#%% Package setup
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.stats import mannwhitneyu
from scipy.stats import chisquare
from time import time
start = time()
file_path = Path(__file__)
eicu_path = file_path.parent.parent.parent.joinpath('eicu')
hist_path = file_path.parent.parent.joinpath('Features','History')
comorb_path = file_path.parent.parent.joinpath('Features','Comorbidity')
#%% Import data.
comp = pd.read_csv('ICU_readmissions_dataset.csv')
pat = pd.read_csv(eicu_path.joinpath('patient.csv'),
usecols=['patientunitstayid','gender', 'age',
'ethnicity','unitdischargeoffset',
'unitadmitsource','hospitaldischargestatus'])
apache = pd.read_csv(eicu_path.joinpath('apachepatientresult.csv'),
usecols=['patientunitstayid','apachescore',
'apacheversion'])
diag = pd.read_csv(eicu_path.joinpath('admissiondx.csv'),
usecols=['patientunitstayid','admitdxname'])
names = pd.read_csv('admissiondx_organ_system_paths.csv')
#%% Data cleaning.
#Get just patient stays we care about.
for data in [pat,apache,diag]:
data.drop(data[
~data['patientunitstayid'].isin(comp['patientunitstayid'])].index,
inplace=True)
#Just get organ systems from diag.
diag = diag[diag['admitdxname'].isin(names['admitdxname'])]
#Convert age to numeric.
def age_to_nums(age):
if age == '> 89':
return 90
else:
return float(age)
pat['age'] = pat['age'].apply(age_to_nums)
#Get just apacheiv scores, make -1s nans.
apache = apache[apache['apacheversion']=='IV']
apache = apache[['patientunitstayid','apachescore']]
apache = apache[~(apache['apachescore']==-1)]
apache = apache.merge(pat[['patientunitstayid']],on='patientunitstayid',
how='right')
#Make unitdischargeoffset into hours.
pat['unitdischargeoffset'] = pat['unitdischargeoffset']/60
# Combine rare admission sources into one "Other" category.
def other_adm_sources(admit_source):
if admit_source in ['Operating Room','Recovery Room','PACU','Floor',
'Emergency Department']:
return admit_source
else:
return 'Other'
pat['unitadmitsource'] = pat['unitadmitsource'].apply(other_adm_sources)
#%% Get separate groups.
#Which patients were postive? Which were negative?
readm_pos = comp[comp['bad_disch_plan']==1]['patientunitstayid']
readm_neg = comp[comp['bad_disch_plan']==0]['patientunitstayid']
#Merge all the data together.
pat = pat.merge(apache,on='patientunitstayid',how='left')
pat = pat.merge(diag,on='patientunitstayid',how='left')
#Get positive only versions of all extracted info.
pat_pos = pat[pat['patientunitstayid'].isin(readm_pos)]
#Get negative only versions of all extracted info.
pat_neg = pat[pat['patientunitstayid'].isin(readm_neg)]
#%% Get demographic data.
#Add proportion function. Takes a column of raw categorical data, gets counts,
#and adds proportion while converting to string.
def add_prop(col):
col = col.value_counts()
return (' ' + col.astype(str) + ' (' +
(np.round(col/(col.sum())*100,decimals=2)).astype(str) + '%)')
#Gets proportion and counts for each cohort, makes a dataframe of it.
def all_prop(col_name):
df = pd.DataFrame(columns=['No Readmit/Death','Readmit/Death','Total','p-Value'])
df['No Readmit/Death'] = add_prop(pat_neg[col_name])
df['Readmit/Death'] = add_prop(pat_pos[col_name])
df['Total'] = add_prop(pat[col_name])
#Chi squared testing,
#Find if difference between no delirium/delirium groups is significant.
num_df = df[['No Readmit/Death','Readmit/Death']].copy()
#Convert the data to just proportions.
for col in num_df.columns:
#Replace nans with 0s.
num_df.replace(to_replace=np.nan,value='(0%',inplace=True)
#Parse string to get percentage.
num_df[col] = num_df[col].str.split('(',1)
num_df[col] = num_df[col].apply(lambda x: x[1])
num_df[col] = num_df[col].str.split('%',1)
num_df[col] = num_df[col].apply(lambda x: x[0])
num_df[col] = num_df[col].astype(float)
chisq,p = chisquare(num_df[['Readmit/Death']],num_df[['No Readmit/Death']])
df.iloc[0,3] = np.round(p[0],decimals=3)
return df
#Gender
gender = all_prop('gender')
#Ethnicity
ethn = all_prop('ethnicity')
#Hospital Mortality
mort = all_prop('hospitaldischargestatus')
#Unit Admit Source
source = all_prop('unitadmitsource')
#Primary AdmissionDx Group
groups = all_prop('admitdxname')
#Takes in raw data column, outputs string of median and IQR.
def get_med_iqr(col):
return (str(np.round(col.median(),decimals=2)) + ' [' +
str(np.round(col.quantile(q=0.25),decimals=2)) + '-' +
str(np.round(col.quantile(q=0.75),decimals=2)) + ']')
#Takes in raw data column,a title and a unit, makes IQR dataframe out of it
#for each cohort.
def all_med_iqr(col_name,title,units):
neg_col = pat_neg[col_name]
pos_col = pat_pos[col_name]
df = | pd.DataFrame(columns=['No Readmit/Death','Readmit/Death','Total','p-Value']) | pandas.DataFrame |
from datetime import datetime
from datetime import timedelta
from time import time
from datetime import date
import pandas as pd
import numpy as np
import yfinance as yf
from bs4 import BeautifulSoup
import requests
from . import db
from tqdm import tqdm
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' }
stocks = db.stockDB()
sectors = db.sectors
periods = db.periods
def nameForUrl(stockName):
name = [v for k, v in stocks.items() if k == stockName][0]
return name
def parseData(url, timeFrame):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('table', class_=['historical_data_table table'])
time = [results[0] if timeFrame == "A" else results[1]]
history = time[0].find_all('td')
histList = [i.get_text() for i in history]
yearsQuarters = [histList[i::2] for i in range(2)]
change = [yearsQuarters[1][i].replace('$', '') for i in range(len(yearsQuarters[1]))]
change = [c if len(c) > 0 else '0' for c in change]
output = [int(change[i].replace(',', '')) for i in range(len(change))]
return dict(zip(yearsQuarters[0], output))
def parseNoTime(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('table', class_=['historical_data_table table'])
time = [results[0]]
history = time[0].find_all('td')
histList = [i.get_text() for i in history]
yearsQuarters = [histList[i::2] for i in range(2)]
change = [yearsQuarters[1][i].replace('$', '') for i in range(len(yearsQuarters[1]))]
change = [c if len(c) > 0 else '0' for c in change]
output = [int(change[i].replace(',', '')) for i in range(len(change))]
final = dict(zip(yearsQuarters[0], output))
return final
###### Income statement data
def getInventory(stockName,timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/inventory"
final = parseData(url, timeFrame)
return final
def getRevenue(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/revenue"
final = parseData(url, timeFrame)
return final
def getGrossProfit(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/gross-profit"
final = parseData(url, timeFrame)
return final
def getOperatingIncome(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/operating-income"
final = parseData(url, timeFrame)
return final
def getEbit(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/ebit"
final = parseData(url, timeFrame)
return final
def getEBITDA(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/ebitda"
final = parseData(url, timeFrame)
return final
def getInterest(ticker):
url = 'https://finance.yahoo.com/quote/' + ticker + '/financials'
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
tables = soup.find_all('span')
row = [t.get_text() for t in tables]
dates = [row.index("Breakdown")+num for num in range(1,6)]
date = [row[i] for i in dates]
int_tables = soup.find_all('span')
int_row = [t.get_text() for t in int_tables]
ie = [int_row.index('Interest Expense')+num for num in range(1,6)]
interests = [int_row[i] for i in ie]
change = [i if len(i) > 0 else '0' for i in interests]
interest = [c[:-3].replace(',', '') for c in change if "," in c]
final_int = [int(i) for i in interest]
return {date[i]: final_int[i] for i in range(len(final_int))}
def getNetIncome(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/net-income"
final = parseData(url, timeFrame)
return final
def getEPS(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/eps-earnings-per-share-diluted"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('table', class_=['historical_data_table table'])
time = [results[0] if timeFrame == "A" else results[1]]
history = time[0].find_all('td')
histList = [i.get_text() for i in history]
yearsQuarters = [histList[i::2] for i in range(2)]
change = [yearsQuarters[1][i].replace('$', '') for i in range(len(yearsQuarters[1]))]
change = [c if len(c) > 0 else '0' for c in change]
output = [float(change[i].replace(',', '')) for i in range(len(change))]
return dict(zip(yearsQuarters[0], output))
def getSharesOutstanding(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/shares-outstanding"
final = parseData(url, timeFrame)
return final
##### Balance sheet items
def getTotalAssets(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-assets"
final = parseData(url, timeFrame)
return final
def getCashOnHand(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/cash-on-hand"
final = parseData(url, timeFrame)
return final
def getLongTermDebt(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/long-term-debt"
final = parseData(url, timeFrame)
return final
def getTotalLiab(ticker, timeFrame):
name = nameForUrl(ticker)
url = "https://www.macrotrends.net/stocks/charts/" + ticker + "/" + name + "/total-liabilities"
final = parseData(url, timeFrame)
return final
def getShareholderEquity(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-share-holder-equity"
final = parseData(url, timeFrame)
return final
def getEmployees(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/number-of-employees"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('table', class_=['historical_data_table table'])
time = [results[0]]
history = time[0].find_all('td')
histList = [i.get_text() for i in history]
yearsQuarters = [histList[i::2] for i in range(2)]
change = [yearsQuarters[1][i].replace('$', '') for i in range(len(yearsQuarters[1]))]
output = [int(change[i].replace(',', '')) for i in range(len(change))]
final = dict(zip(yearsQuarters[0], output))
return final
def getCurrentAssets(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-current-assets"
final = parseData(url, timeFrame)
return final
def getCurrentLiab(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-current-liabilities"
final = parseData(url, timeFrame)
return final
def getPPE(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/net-property-plant-equipment"
final = parseData(url, timeFrame)
return final
def getRecievables(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/receivables-total"
final = parseData(url, timeFrame)
return final
def getPayables(ticker):
url = 'https://www.zacks.com/stock/quote/' + ticker + '/balance-sheet'
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
pay_tables = soup.find_all('td')
tables = soup.find_all('span')
row = [t.get_text() for t in tables]
payRow = [t.get_text() for t in pay_tables]
ap = [payRow.index('Accounts Payable')+num for num in range(1,6)]
dates = [row.index("Liabilities & Shareholders Equity")+num for num in range(1,6)]
payable = [payRow[i] for i in ap]
date = [row[i] for i in dates[1:]]
payables = [int(i.replace(',', '')) for i in payable[1:]]
return {date[i]: payables[i] for i in range(len(payables))}
def getCOGS(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/cost-goods-sold"
final = parseData(url, timeFrame)
return final
def getOperatingExpenses(stock, timeFrame):
operatingIncome = getOperatingIncome(stock, timeFrame)
grossProfit = getGrossProfit(stock, timeFrame)
operatingAnn = list(operatingIncome.values())
grossAnn = list(grossProfit.values())
opExpAnn = [grossAnn[i] - operatingAnn[i] for i in range(len(grossAnn))]
years = list(grossProfit.keys())
finalOpAnn = dict(zip(years, opExpAnn))
return finalOpAnn
def getOperatingCF(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/cash-flow-from-operating-activities"
final = parseNoTime(url)
return final
def getInvestingCF(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/cash-flow-from-investing-activities"
final = parseNoTime(url)
return final
def getFinancingCF(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/cash-flow-from-financial-activities"
final = parseNoTime(url)
return final
def getDepreciation(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-depreciation-amortization-cash-flow"
final = parseNoTime(url)
return final
def getNetPPEchange(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/net-change-in-property-plant-equipment"
final = parseNoTime(url)
return final
def getNetCurrentDebt(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/net-current-debt"
final = parseNoTime(url)
return final
def getNetDebt(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/debt-issuance-retirement-net-total"
final = parseNoTime(url)
return final
def getTotalDivsPaid(stockName):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-common-preferred-stock-dividends-paid"
final = parseNoTime(url)
return final
def getTaxes(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/total-provision-income-taxes"
final = parseData(url, timeFrame)
return final
def getPretaxIncome(stockName, timeFrame):
name = nameForUrl(stockName)
url = "https://www.macrotrends.net/stocks/charts/"+ stockName + "/" + name + "/pre-tax-income"
final = parseData(url, timeFrame)
return final
def getCapex(ticker):
ppe = list(getPPE(ticker, "A").values())
ppeChange = [ppe[i] - ppe[i+1] for i in range(len(ppe)-1)]
dep = list(getDepreciation('AAPL').values())
years = list(getDepreciation('AAPL').keys())
capex = [ppeChange[i] + dep[i] for i in range(len(ppeChange))]
return {years[i]: capex[i] for i in range(len(capex))}
def fcf(ticker):
ocf = list(getOperatingCF(ticker).values())
interest = list(getInterest(ticker).values())
taxes = list(getTaxes(ticker, "A").values())
pretax = list(getPretaxIncome(ticker, "A").values())
taxPerc = [taxes[i] / pretax[i] for i in range(len(taxes))]
intertax = [interest[i] * (1-taxPerc[i]) for i in range(len(interest))]
capex = list(getCapex(ticker).values())
fcf = [ocf[i] + intertax[i] - capex[i] for i in range(len(intertax))]
years = list(getOperatingCF(ticker).keys())
return {years[i]: round(fcf[i], 2) for i in range(len(fcf))}
def price(stockName):
url = "https://finance.yahoo.com/quote/" + stockName
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
results = list(soup.find('span', class_=["Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)"]))
results = [results[0].replace(',', '')]
return float(results[0])
def marketCap(ticker):
p = price(ticker)
shares = getSharesOutstanding(ticker, 'Q')
return p * shares
def sectorTickers(sectorName):
name = [v for k, v in sectors.items() if k == sectorName][0]
url = "https://www.stockmonitor.com/sector/" + name
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('a', href=True)
parsed = [r.get_text() for r in results]
front = parsed[0:10]
back = parsed[:-7]
tickers = back[10:]
return tickers
def prices(stock, start, end=None):
if end == None:
if start in periods.keys():
period = [v for k, v in periods.items() if start == k][0]
if period == 756:
prices = yf.Ticker(stock).history('max')['Close']
hist = prices[-period:]
return pd.DataFrame(hist)
else:
prices = yf.Ticker(stock).history(period)['Close']
return pd.DataFrame(prices)
else:
raise ValueError('invalid input: ' + start + '. Valid options are: 1d, 5d, 1m, 3m, 6m, 1y, 2y, 3y, 5y, 10y, ytd, max. Or add a finish date.')
else:
prices = list(yf.download(stock, start=start, end=end)['Adj Close'])
return prices
def volume(stock, start, end=None):
if end == None:
if start in periods.keys():
period = [v for k, v in periods.items() if start == k][0]
if period == 756:
volumes = yf.Ticker(stock).history('max')['Volume']
hist = volumes[-period:]
return pd.DataFrame(hist)
else:
volumes = yf.Ticker(stock).history(period)['Volume']
return pd.DataFrame(volumes)
else:
raise ValueError('invalid input: ' + start + '. Valid options are: 1d, 5d, 1m, 3m, 6m, 1y, 2y, 3y, 5y, 10y, ytd, max. Or add a finish date.')
else:
volumes = list(yf.download(stock, start=start, end=end)['Volume'])
return volumes
def sector(ticker):
url = 'https://finance.yahoo.com/quote/'+ ticker + '/profile'
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
find = soup.find_all('span', class_='Fw(600)')
results = [f.get_text() for f in find]
if len(results) == 0:
find = soup.find_all('span', class_='Fl(end)')
results = [f.get_text() for f in find]
#sec = [results[0] if results[0] != "N/A" else results[1]]
sec = results[5]
else:
sec = results[0]
return sec
def industry(ticker):
url = 'https://finance.yahoo.com/quote/'+ ticker + '/profile'
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
find = soup.find_all('span', class_='Fw(600)')
results = [f.get_text() for f in find]
if len(results) == 0:
find = soup.find_all('span', class_='Fl(end)')
results = [f.get_text() for f in find]
ind = [results[0] if results[0] != "N/A" else results[1]]
ind = ind[0]
else:
ind = results[1]
return ind
def optionExp(ticker):
url = "https://finance.yahoo.com/quote/" + ticker + "/options"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
tables = soup.find_all('option')
values = [tag['value'] for tag in tables if "value" in tag.attrs]
dates = [i.get_text() for i in tables]
reference = dict(zip(dates, values))
return reference
def optionExpry(ticker):
url = "https://finance.yahoo.com/quote/" + ticker + "/options"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
tables = soup.find_all('option')
dates = [i.get_text() for i in tables]
return dates
def callsPuts(ticker, date):
reference = optionExp(ticker)
table = [v for k, v in reference.items() if k == date][0]
tableUrl = 'https://finance.yahoo.com/quote/' + ticker + '/options?date=' + table
page = requests.get(tableUrl, headers=headers)
test = | pd.read_html(page.text) | pandas.read_html |
# 0.0 - Imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from boruta import BorutaPy
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
## 0.1 - Helper Functions
# This codes are for displaying a larger number of rowns and columns
pd.options.display.max_columns = 200
pd.options.display.max_rows= 200
# This codes are for displaying larger pictures and graphics
plt.rcParams['figure.figsize']= [30,20]
plt.rcParams['font.size'] = 30
# 1.0 - Data management
df_train_raw = pd.read_csv('C:/Users/<NAME>/Desktop/Projetos/ciencia de dados/House_prices/train.csv')
df_test_raw = pd.read_csv('C:/Users/<NAME>/Desktop/Projetos/ciencia de dados/House_prices/test.csv')
df_sample_submission_raw = | pd.read_csv('C:/Users/<NAME>/Desktop/Projetos/ciencia de dados/House_prices/sample_submission.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import numpy as np
from pathlib import Path
import pandas as pd
plt.style.use('seaborn-white')
# sns.set()
font = {'family' : 'sans-serif',
'size' : 16}
matplotlib.rc('font', **font)
sns.set_palette('mako')
if __name__ == '__main__':
random = Path(__file__).resolve().parent / 'random/active_learning_1.npy'
random_history = np.load(random, allow_pickle=True).item()
n_data = [len(random_history[k]['train_index']) for k in random_history]
r2 = [random_history[k]['R2_test'] for k in random_history]
df = | pd.DataFrame({'n_data': n_data, 'R2_random': r2}) | pandas.DataFrame |
import os
from math import ceil
import numpy as np
from scipy.io import wavfile
from collections import defaultdict
import pandas as pd
# Ensure that the current file is a wave file
def is_wav_file(file_name):
return file_name.lower().endswith('.wav')
# Create a dictionary to match each class_name with its files
def get_dataset_files(dataset_path):
# Create a dictionary of lists
dataset_files = defaultdict(list)
for dir_path, _, file_names in os.walk(dataset_path):
for file_name in file_names:
if is_wav_file(file_name):
class_name = os.path.basename(dir_path)
dataset_files[class_name].append(file_name)
return dataset_files
# Create a list that contains all background noise files and their probability_distribution
def get_noise_files(bkg_noise_path, signal_sr=16000):
files_list = list()
for file_name in os.listdir(bkg_noise_path):
if is_wav_file(file_name):
files_list.append(file_name)
signals_length = list()
for file_name in files_list:
file_path = os.path.join(bkg_noise_path, file_name)
sampling_rate, signal = wavfile.read(file_path)
file_dir = os.path.join('background_noise', file_name)
# Ensure that the sampling rate of the current file is correct
assert sampling_rate == signal_sr, '{}'.format(file_dir)
signals_length.append(signal.shape[0])
signals_length = np.array(signals_length)
probability_distribution = signals_length / signals_length.sum()
return files_list, probability_distribution
# Group the list of files by person
def group_by_person(files_list):
person = defaultdict(list)
for file_name in files_list:
person[file_name[:8]].append(file_name)
return list(person.values())
# Dataset splitting: training set, validation set, test set
def split(args, validation_part=0.2, test_part=0.2):
dataset_path = os.path.join(args.data_root, 'dataset')
dataset_files = get_dataset_files(dataset_path)
training_files, validation_files, test_files = [], [], []
permutation = None
for class_name, files_list in dataset_files.items():
files_lists = group_by_person(files_list)
if permutation is None:
permutation = np.random.permutation(len(files_lists))
num_test = ceil(test_part * len(files_lists))
num_validation = ceil(validation_part * len(files_lists))
for i in range(num_test):
idx = permutation[i]
for file_name in files_lists[idx]:
file_path = os.path.join('dataset', class_name, file_name).replace("\\", "/")
test_files.append((file_path, class_name))
for i in range(num_test, num_test + num_validation):
idx = permutation[i]
for file_name in files_lists[idx]:
file_path = os.path.join('dataset', class_name, file_name).replace("\\", "/")
validation_files.append((file_path, class_name))
for i in range(num_test + num_validation, len(files_lists)):
idx = permutation[i]
for file_name in files_lists[idx]:
file_path = os.path.join('dataset', class_name, file_name).replace("\\", "/")
training_files.append((file_path, class_name))
return {'train': training_files, 'val': validation_files, 'test': test_files}
def split_to_csv(dataset_splits):
d = defaultdict(lambda: defaultdict(list))
for set_name, set_data in dataset_splits.items():
for file_path, class_name in set_data:
d[set_name]['file'].append(file_path)
d[set_name]['class'].append(class_name)
for set_name, set_dict in d.items():
data_frame = | pd.DataFrame(set_dict) | pandas.DataFrame |
#python3
import argparse
import os
from Bio.PDB import *
import pandas as pd
# input parameters
ap = argparse.ArgumentParser(description="search for pdb files in the current directory, run DSSP, calculate the percentage of secondary structures for each pdb and export a txt file with the file names as rows and the secondary structure as columns")
ap.add_argument("-out", "--output", required=True, help="output txt file")
args = vars(ap.parse_args())
# retrieve fasta file names
file_list = []
for filename in sorted(os.listdir(str(os.getcwd()))):
if filename.endswith(".pdb"):
file_list.append(filename.split(".pdb")[0])
# main
df_list = [] # setup empty list
# retrieves each pdb file on the current directory and calculates the secondary structure percentage
for filename in sorted(os.listdir(str(os.getcwd()))):
if filename.endswith(".pdb"):
parser = PDBParser()
s = parser.get_structure("name", filename)
fill = s[0]
dssp = DSSP(fill, filename, dssp='mkdssp')
df = | pd.DataFrame(dssp) | pandas.DataFrame |
import sys, os
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score, auc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib import rcParams
from scipy import interp
fontpath = '/usr/share/fonts/truetype/msttcorefonts/Times_New_Roman.ttf'
prop = font_manager.FontProperties(fname = fontpath)
rcParams['font.family'] = prop.get_name()
rcParams['font.serif'] = ['Times New Roman']
rcParams['text.usetex'] = True
EVAL_PAIRS_SCORE=False
AVERAGE_PER_COMPLEX=False
SAVE_FIG_NAME=None
SAVE_FIG_NAME="ROC-and-precision-recall_DImS_struct_differentScoring_BIPSPI.png"
#SAVE_FIG_NAME="ROC-and-precision-recall_DBv5_Pairs.png"
def loadResults( resultsPath, prefix, evalPairsScores):
if EVAL_PAIRS_SCORE:
fname= os.path.join(resultsPath, prefix+".res.tab")
scoresDf= pd.read_table(fname, comment="#", sep="\s+", dtype={"structResIdL":str,
"chainIdL":str, "structResIdR":str, "chainIdR":str})
else:
fname= os.path.join(resultsPath, prefix+".res.tab.rec")
scoresDf1= pd.read_table(fname, comment="#", sep="\s+", dtype={"resId":str, "chainId":str})
fname= os.path.join(resultsPath, prefix+".res.tab.lig")
scoresDf2= | pd.read_table(fname, comment="#", sep="\s+", dtype={"resId":str, "chainId":str}) | pandas.read_table |
import pandas as pd
import numpy as np
from scipy.stats import linregress as _linregress
from scipy.stats import rv_histogram as _rv_histogram
def Froude_number(v, h, g=9.80665):
"""
Calculate the Froude Number of the river, channel or duct flow,
to check subcritical flow assumption (if Fr <1).
Parameters
------------
v : int/float
Average velocity [m/s].
h : int/float
Mean hydrolic depth float [m].
g : int/float
Gravitational acceleration [m/s2].
Returns
---------
Fr : float
Froude Number of the river [unitless].
"""
assert isinstance(v, (int,float)), 'v must be of type int or float'
assert isinstance(h, (int,float)), 'h must be of type int or float'
assert isinstance(g, (int,float)), 'g must be of type int or float'
Fr = v / np.sqrt( g * h )
return Fr
def exceedance_probability(D):
"""
Calculates the exceedance probability
Parameters
----------
D : pandas Series
Data indexed by time [datetime or s].
Returns
-------
F : pandas DataFrame
Exceedance probability [unitless] indexed by time [datetime or s]
"""
assert isinstance(D, (pd.DataFrame, pd.Series)), 'D must be of type pd.Series' # dataframe allowed for matlab
if isinstance(D, pd.DataFrame) and len(D.columns) == 1: # for matlab
D = D.squeeze().copy()
# Calculate exceedence probability (F)
rank = D.rank(method='max', ascending=False)
F = 100* (rank / (len(D)+1) )
F = F.to_frame('F') # for matlab
return F
def polynomial_fit(x, y, n):
"""
Returns a polynomial fit for y given x of order n
with an R-squared score of the fit
Parameters
-----------
x : numpy array
x data for polynomial fit.
y : numpy array
y data for polynomial fit.
n : int
order of the polynomial fit.
Returns
----------
polynomial_coefficients : numpy polynomial
List of polynomial coefficients
R2 : float
Polynomical fit coeffcient of determination
"""
try:
x = np.array(x)
except:
pass
try:
y = np.array(y)
except:
pass
assert isinstance(x, np.ndarray), 'x must be of type np.ndarray'
assert isinstance(y, np.ndarray), 'y must be of type np.ndarray'
assert isinstance(n, int), 'n must be of type int'
# Get coeffcients of polynomial of order n
polynomial_coefficients = np.poly1d(np.polyfit(x, y, n))
# Calculate the coeffcient of determination
slope, intercept, r_value, p_value, std_err = _linregress(y, polynomial_coefficients(x))
R2 = r_value**2
return polynomial_coefficients, R2
def discharge_to_velocity(D, polynomial_coefficients):
"""
Calculates velocity given discharge data and the relationship between
discharge and velocity at an individual turbine
Parameters
------------
D : pandas Series
Discharge data [m3/s] indexed by time [datetime or s]
polynomial_coefficients : numpy polynomial
List of polynomial coefficients that discribe the relationship between
discharge and velocity at an individual turbine
Returns
------------
V: pandas DataFrame
Velocity [m/s] indexed by time [datetime or s]
"""
assert isinstance(D, (pd.DataFrame, pd.Series)), 'D must be of type pd.Series' # dataframe allowed for matlab
assert isinstance(polynomial_coefficients, np.poly1d), 'polynomial_coefficients must be of type np.poly1d'
if isinstance(D, pd.DataFrame) and len(D.columns) == 1: # for matlab
D = D.squeeze().copy()
# Calculate velocity using polynomial
vals = polynomial_coefficients(D)
V = | pd.Series(vals, index=D.index) | pandas.Series |
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, roc_auc_score
from sklearn import metrics
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import f1_score
from functools import reduce
import subprocess
def bash(command, mute = False):
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if not mute:
print(output)
def files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def reduce_memory(df):
for col in df.columns:
if df[col].dtype != object:
# make variables for Int, max and min
IsInt = False
mx = df[col].max()
mn = df[col].min()
# Integer does not support NA, therefore, NA needs to be filled
if not np.isfinite(df[col]).all():
df[col].fillna(mn-1,inplace=True)
# test if column can be converted to an integer
asint = df[col].fillna(0).astype(np.int64)
result = (df[col] - asint)
result = result.sum()
if result > -0.01 and result < 0.01:
IsInt = True
# Make Integer/unsigned Integer datatypes
if IsInt:
if mn >= 0:
if mx < 255:
df[col] = df[col].astype(np.uint8)
elif mx < 65535:
df[col] = df[col].astype(np.uint16)
elif mx < 4294967295:
df[col] = df[col].astype(np.uint32)
else:
df[col] = df[col].astype(np.uint64)
else:
if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
# Make float datatypes 32 bit
else:
df[col] = df[col].astype(np.float32)
return df
def get_csv_dataset(DATAPATH, pattern, target = 'target', separate_x_y = True):
data_dict = {'target': [], 'county': [], 'record': []}
count = 0
for i, filepath in enumerate(files(DATAPATH)):
if pattern in filepath:
with open(os.path.join(DATAPATH, filepath), 'r') as file:
lines = file.readlines()
for record_line in lines:
count += 1
data_dict['target'].append(int(filepath[1:4] == "POS"))
data_dict['fips'].append(filepath[4:])
data_dict['record'].append(record_line)
df = pd.DataFrame(data_dict)
if not separate_x_y:
return df
else:
return df.drop(target, 1), df[target]
def get_csv_dataset(DATAPATH):
data_dict = {'gender': [], 'fips': [], 'record': [], 'target': []}
for filename in os.listdir(DATAPATH):
with open(DATAPATH + "/" + filename, 'r') as f:
ao = f.readlines()
for record in ao:
#data_dict['patient_id'].append(int(record[1]))
data_dict['gender'].append(int(filename[0] == "M"))
data_dict['target'].append(int(filename[1:4] == "POS"))
data_dict['fips'].append(filename[4:])
data_dict['record'].append(record)
df = pd.DataFrame(data_dict)
return df
def intc(i):
try:
return int(i)
except:
return 0
def powerset(iterable, minlen = 0):
"""
RETURN THE LIST OF ALL THE PROPER SUBSETS
"""
xs = list(iterable)
# note we return an iterator rather than a list
return [list(i) for i in chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1)) if len(i) > minlen]
def get_test_data(PATH, FIRST_N_WEEKS, full = False):
if full:
#test = get_csv_dataset(PATH, 'M', separate_x_y = False)
test = get_csv_dataset(PATH)
else:
test = pd.read_csv('TEST_SET.csv')
test['sequence'] = [[intc(i) for i in record[:-2].split(' ')][:FIRST_N_WEEKS] for record in test.record] #intc(i) to get integer
X = pd.DataFrame(test.sequence.tolist(), columns=['SEQ_%i' % i for i in range(FIRST_N_WEEKS)])
X['fips'] = test.fips
X['target'] = test.target
X['gender'] = test.gender
#X['patient_id'] = test.patient_id
X.dropna(axis=0, inplace = True)
return X
def read_neighbor(sep=" "):
'''
Read neighbor fips file
'''
n_f = | pd.DataFrame() | pandas.DataFrame |
from Bio import SeqIO
import pandas as pd
import numpy as np
import math
import re
import os
def invert_new(pdDataFrame, No_genes, pd_column_index, gene_description):
pdDataFrame_dict = pdDataFrame.to_dict()
new_pdDataFrame_dict = {}
for i in No_genes.index:
new_pdDataFrame_dict[i] = list(pdDataFrame_dict[i].values())[:No_genes[i]]
complement = re.compile(r'strand_(.*)?\|\|')
pdDataFrame_dict_inverted = {}
for i in pd_column_index.columns:
for j in pd_column_index.index:
if 1 <= pd_column_index.at[j,i] < 2:
if complement.search(gene_description.at[j,i]).group(1) == '-1':
pdDataFrame_dict_inverted[i] = list(new_pdDataFrame_dict[i])[::-1]
else:
pdDataFrame_dict_inverted[i] = list(new_pdDataFrame_dict[i])
else:
continue
return pd.DataFrame.from_dict(pdDataFrame_dict_inverted, orient='index').T
def make_target_centered(list_AMR, temp):
AMR = list_AMR[0].copy()
for i in range(len(list_AMR))[::-1]:
AMR.update(list_AMR[i][list_AMR[i].astype(int) == list_AMR[i].astype(int).max().max()])
AMR.replace({0.01:np.nan}, inplace=True)
No_genes = (~(AMR.isnull())).sum()
gene_dict = {}
description = {}
gene_name = re.compile(r'\|\|\s(.*)?')
for i in No_genes.index:
each_gene = []
each_description = []
for j in range(No_genes[i]):
record = SeqIO.read(temp + '/each_fasta/' + i +'/' + str(j) + '.fasta','fasta')
each_description.append(record.description)
each_gene.append(gene_name.search(record.description).group(1))
gene_dict[i] = each_gene
description[i] = each_description
gene = pd.DataFrame.from_dict(gene_dict, orient='index').T
gene_description = pd.DataFrame.from_dict(description, orient='index').T
AMR_inverted = invert_new(AMR, No_genes, AMR, gene_description)
target_len = pd.DataFrame(columns=['complete sequence','position','len','seq'])
AMR_inverted_int = AMR_inverted.fillna(0).astype(int)
for i in AMR_inverted_int.T.index:
position = []
length = []
sequence = []
for j in AMR_inverted_int[i][AMR_inverted_int[i]==1].index:
record = SeqIO.read(temp + '/each_fasta/' + i +'/' + str(j) + '.fasta','fasta')
position.append(j)
length.append(len(record.seq))
sequence.append(str(record.seq))
target_len.loc[i,'position'] = position
target_len.loc[i,'len'] = length
target_len.loc[i,'seq'] = sequence
for i in No_genes.index:
record = SeqIO.read(temp + '/fasta/'+ i + '.fasta','fasta')
if re.compile(r'complete sequence').search(record.description) or re.compile(r'complete genome').search(record.description):
if not re.compile(r'cds').search(record.description):
target_len.loc[i,'complete sequence'] = 'yes'
# targetを先頭にする
first = {}
for i in AMR_inverted.columns:
position = target_len.loc[i,'position'][0]
upper = list(AMR_inverted.T.loc[i,position:].dropna())
lower = list(AMR_inverted.T.loc[i,:position-1].dropna())
first[i] = upper + lower
target_first = pd.DataFrame.from_dict(first,orient='index')
gene_inverted = invert_new(gene, No_genes, AMR, gene_description)
# geneの種類についてtargetを先頭にする
first_gene = {}
for i in AMR_inverted.columns:
position = target_len.loc[i,'position'][0]
upper = list(gene_inverted.T.loc[i,position:].dropna())
lower = list(gene_inverted.T.loc[i,:position-1].dropna())
first_gene[i] = upper + lower
gene_target_first = pd.DataFrame.from_dict(first_gene,orient='index')
# 最大のレーン決める
mid = pd.DataFrame(columns=['mid'])
for i in target_first.index:
mid.loc[i,'mid'] = math.ceil((len(target_first.loc[i,:]) - target_first.loc[i,:].isnull().sum())/2)
maximum = mid.max()
# とりあえずplasmidを半分にして、targetをだいたい中央に持ってくる
center = {}
for i in target_first.index:
Mid = int(mid.loc[i,'mid'])
center[i] = [None] * (int(maximum[0])-Mid) + list(target_first.loc[i,Mid+1:].dropna()) + list(target_first.loc[i,:Mid])
modified_AMR = pd.DataFrame.from_dict(center, orient='index').T
# とりあえずplasmidを半分にして、targetをだいたい中央に持ってくる
gene_center = {}
for i in gene_target_first.index:
Mid = int(mid.loc[i,'mid'])
gene_center[i] = [None] * (int(maximum[0])-Mid) + list(gene_target_first.loc[i,Mid+1:].dropna()) + list(gene_target_first.loc[i,:Mid])
modified_gene = pd.DataFrame.from_dict(gene_center, orient='index').T
# targetが中央になるようにする。これで完成
# completeでないdata setはそのままの順序で挿入しておく。
if len(modified_AMR.index) > 10:
around_max_target10 = modified_AMR.loc[int(maximum[0])-5:int(maximum[0])+5,:].fillna(0).astype(int)
else:
around_max_target10 = modified_AMR.fillna(0).astype(int)
count_target = (around_max_target10==1).sum(axis='columns')
count_target_max = count_target.max()
for i in list(count_target[count_target == count_target_max].index):
target_max_row = i
if count_target[target_max_row+1] > 0: # 1つ下の行に1があれば、そちらに合わせる。
target_max_row += 1
new_modified = {}
gene_new_modified = {}
for i in modified_AMR.columns:
if target_len.loc[i,'complete sequence'] == 'yes':
align_AMR = list(modified_AMR.T.loc[i,:])
align_gene = list(modified_gene.T.loc[i,:])
d = 0
while modified_AMR.fillna(0).astype(int).T.loc[i,target_max_row-d] != 1:
d += 1
new_modified[i] = [np.nan] * d + align_AMR
gene_new_modified[i] = [np.nan] * d + align_gene
else:
align_AMR = list(AMR_inverted.T.loc[i,:])
align_gene = list(gene_inverted.T.loc[i,:])
new_modified[i] = [np.nan] * (target_max_row - target_len.loc[i,'position'][0]) + align_AMR
gene_new_modified[i] = [np.nan] * (target_max_row - target_len.loc[i,'position'][0]) + align_gene
pd_new_modified = | pd.DataFrame.from_dict(new_modified, orient='index') | pandas.DataFrame.from_dict |
import datetime
import pathlib
import pickle
from io import BytesIO
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import yaml
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset
from ml_tooling.logging import Log
from ml_tooling.metrics import Metrics, Metric
from ml_tooling.result import Result
from ml_tooling.search import Real
from ml_tooling.storage import FileStorage
from ml_tooling.transformers import DFStandardScaler, DFFeatureUnion
from ml_tooling.utils import MLToolingError, DatasetError
plt.switch_backend("agg")
class TestBaseClass:
def test_is_properties_works(
self, classifier: Model, regression: Model, pipeline_linear: Pipeline
):
assert classifier.is_regressor is False
assert classifier.is_classifier is True
assert regression.is_regressor is True
assert regression.is_classifier is False
assert classifier.is_pipeline is False
assert regression.is_pipeline is False
pipeline = Model(pipeline_linear)
assert pipeline.is_pipeline is True
def test_instantiate_model_with_non_estimator_pipeline_fails(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
with pytest.raises(
MLToolingError,
match="You passed a Pipeline without an estimator as the last step",
):
Model(example_pipe)
def test_instantiate_model_with_feature_pipeline_sets_estimator_correctly(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
clf = LinearRegression()
model = Model(clf, feature_pipeline=example_pipe)
expected = Pipeline([("features", example_pipe), ("estimator", clf)])
assert model.estimator.steps == expected.steps
def test_instantiate_model_with_other_object_fails(self):
with pytest.raises(
MLToolingError,
match="Expected a Pipeline or Estimator - got <class 'dict'>",
):
Model({})
def test_default_metric_getter_works_as_expected_classifier(self):
rf = Model(RandomForestClassifier(n_estimators=10))
assert rf.config.CLASSIFIER_METRIC == "accuracy"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "accuracy"
rf.default_metric = "fowlkes_mallows_score"
assert rf.config.CLASSIFIER_METRIC == "fowlkes_mallows_score"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "fowlkes_mallows_score"
rf.config.reset_config()
def test_default_metric_getter_works_as_expected_regressor(self):
linreg = Model(LinearRegression())
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "r2"
assert linreg.default_metric == "r2"
linreg.default_metric = "neg_mean_squared_error"
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "neg_mean_squared_error"
assert linreg.default_metric == "neg_mean_squared_error"
linreg.config.reset_config()
def test_default_metric_works_as_expected_without_pipeline(self):
rf = Model(RandomForestClassifier(n_estimators=10))
linreg = Model(LinearRegression())
assert "accuracy" == rf.default_metric
assert "r2" == linreg.default_metric
rf.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == rf.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
rf.config.reset_config()
linreg.config.reset_config()
def test_default_metric_works_as_expected_with_pipeline(
self, pipeline_logistic: Pipeline, pipeline_linear: Pipeline
):
logreg = Model(pipeline_logistic)
linreg = Model(pipeline_linear)
assert "accuracy" == logreg.default_metric
assert "r2" == linreg.default_metric
logreg.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == logreg.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
logreg.config.reset_config()
linreg.config.reset_config()
def test_regression_model_can_be_saved(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
classifier.score_estimator(train_iris_dataset)
load_storage = FileStorage(tmp_path)
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
loaded_model = classifier.load_estimator(saved_model_path, storage=load_storage)
assert loaded_model.estimator.get_params() == classifier.estimator.get_params()
def test_regression_model_filename_is_generated_correctly(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
assert datetime.datetime.strptime(
saved_model_path.stem, f"{classifier.estimator_name}_%Y_%m_%d_%H_%M_%S_%f"
)
def test_save_model_saves_pipeline_correctly(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
saved_model_path = model.save_estimator(FileStorage(tmp_path))
assert saved_model_path.exists()
@patch("ml_tooling.logging.log_estimator.get_git_hash")
def test_save_estimator_saves_logging_dir_correctly(
self, mock_hash: MagicMock, classifier: Model, tmp_path: pathlib.Path
):
mock_hash.return_value = "1234"
with classifier.log(str(tmp_path)):
expected_file = classifier.save_estimator(FileStorage(tmp_path))
assert expected_file.exists()
assert (
"LogisticRegression" in [str(file) for file in tmp_path.rglob("*.yaml")][0]
)
mock_hash.assert_called_once()
def test_save_estimator_with_prod_flag_saves_correctly(self, classifier: Model):
mock_storage = MagicMock()
classifier.save_estimator(mock_storage, prod=True)
mock_storage.save.assert_called_once_with(
classifier.estimator, "production_model.pkl", prod=True
)
def test_save_estimator_uses_default_storage_if_no_storage_is_passed(
self, tmp_path: pathlib.Path, classifier: Model
):
classifier.config.ESTIMATOR_DIR = tmp_path
classifier.save_estimator()
models = classifier.config.default_storage.get_list()
assert len(models) == 1
new_classifier = Model.load_estimator(models[0])
assert (
classifier.estimator.get_params() == new_classifier.estimator.get_params()
)
@patch("ml_tooling.baseclass.import_path")
def test_can_load_production_estimator(
self, mock_path: MagicMock, classifier: Model
):
buffer = BytesIO()
pickle.dump(classifier.estimator, buffer)
buffer.seek(0)
mock_path.return_value.__enter__.return_value = buffer
model = Model.load_production_estimator("test")
assert isinstance(model, Model)
assert isinstance(model.estimator, BaseEstimator)
def test_log_context_manager_works_as_expected(self, regression: Model):
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
with regression.log("test"):
assert regression.config.LOG is True
assert "test" == regression.config.RUN_DIR.name
assert "runs" == regression.config.RUN_DIR.parent.name
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
assert "test" not in regression.config.RUN_DIR.parts
def test_log_context_manager_logs_when_scoring_model(
self, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(LinearRegression())
runs = tmp_path / "runs"
with model.log(str(runs)):
result = model.score_estimator(train_iris_dataset)
for file in runs.rglob("LinearRegression_*"):
with file.open() as f:
log_result = yaml.safe_load(f)
assert result.metrics.score == log_result["metrics"]["r2"]
assert result.model.estimator_name == log_result["estimator_name"]
def test_test_models_logs_when_given_dir(
self, tmp_path: pathlib.Path, train_iris_dataset
):
test_models_log = tmp_path / "test_estimators"
Model.test_estimators(
train_iris_dataset,
[
RandomForestClassifier(n_estimators=10),
DummyClassifier(strategy="prior"),
],
log_dir=str(test_models_log),
metrics="accuracy",
)
for file in test_models_log.rglob("*.yaml"):
with file.open() as f:
result = yaml.safe_load(f)
model_name = result["model_name"]
assert model_name in {
"IrisData_RandomForestClassifier",
"IrisData_DummyClassifier",
}
def test_dump_serializes_correctly_without_pipeline(self, regression: Model):
serialized_model = regression.to_dict()
expected = [
{
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
}
]
assert serialized_model == expected
def test_dump_serializes_correctly_with_pipeline(self, pipeline_linear: Pipeline):
serialized_model = Model(pipeline_linear).to_dict()
expected = [
{
"name": "scale",
"module": "sklearn.preprocessing._data",
"classname": "StandardScaler",
"params": {"copy": True, "with_mean": True, "with_std": True},
},
{
"name": "estimator",
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
},
]
assert serialized_model == expected
def test_to_dict_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion
):
model = Model(feature_union_classifier)
result = model.to_dict()
assert len(result) == 2
union = result[0]
assert union["name"] == "features"
assert len(union["params"]) == 2
pipe1 = union["params"][0]
pipe2 = union["params"][1]
assert pipe1["name"] == "pipe1"
select1 = pipe1["params"][0]
scale1 = pipe1["params"][1]
assert select1["name"] == "select"
assert select1["classname"] == "Select"
assert select1["params"] == {
"columns": ["sepal length (cm)", "sepal width (cm)"]
}
assert scale1["name"] == "scale"
assert scale1["classname"] == "DFStandardScaler"
assert scale1["params"] == {"copy": True, "with_mean": True, "with_std": True}
assert pipe2["name"] == "pipe2"
select2 = pipe2["params"][0]
scale2 = pipe2["params"][1]
assert select2["name"] == "select"
assert select2["classname"] == "Select"
assert select2["params"] == {
"columns": ["petal length (cm)", "petal width (cm)"]
}
assert scale2["name"] == "scale"
assert scale2["classname"] == "DFStandardScaler"
assert scale2["params"] == {"copy": True, "with_mean": True, "with_std": True}
def test_from_yaml_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion, tmp_path: pathlib.Path
):
model = Model(feature_union_classifier)
result = model.to_dict()
log = Log(
name="test", metrics=Metrics.from_list(["accuracy"]), estimator=result
)
log.save_log(tmp_path)
new_model = Model.from_yaml(log.output_path)
assert len(new_model.estimator.steps[0][1].transformer_list) == 2
new_steps = new_model.estimator.steps
old_steps = model.estimator.steps
assert new_steps[0][0] == old_steps[0][0]
assert isinstance(new_steps[0][1], type(old_steps[0][1]))
new_union = new_steps[0][1].transformer_list
old_union = old_steps[0][1].transformer_list
assert len(new_union) == len(old_union)
for new_transform, old_transform in zip(new_union, old_union):
assert new_transform[1].steps[0][0] == old_transform[1].steps[0][0]
assert (
new_transform[1].steps[0][1].get_params()
== old_transform[1].steps[0][1].get_params()
)
def test_can_load_serialized_model_from_pipeline(
self, pipeline_linear: Pipeline, tmp_path: pathlib.Path
):
model = Model(pipeline_linear)
log = Log(
name="test",
estimator=model.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
for model1, model2 in zip(model.estimator.steps, model2.estimator.steps):
assert model1[0] == model2[0]
assert model1[1].get_params() == model2[1].get_params()
def test_can_load_serialized_model_from_estimator(
self, classifier: Model, tmp_path: pathlib.Path
):
log = Log(
name="test",
estimator=classifier.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
assert model2.estimator.get_params() == classifier.estimator.get_params()
class TestTrainEstimator:
def test_train_model_sets_result_to_none(
self, regression: Model, train_iris_dataset
):
assert regression.result is not None
regression.train_estimator(train_iris_dataset)
assert regression.result is None
def test_train_model_followed_by_score_model_returns_correctly(
self, pipeline_logistic: Pipeline, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
model.score_estimator(train_iris_dataset)
assert isinstance(model.result, Result)
def test_train_model_errors_correctly_when_not_scored(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
with pytest.raises(MLToolingError, match="You haven't scored the estimator"):
with model.log(str(tmp_path)):
model.train_estimator(train_iris_dataset)
model.save_estimator(FileStorage(tmp_path))
def test_can_score_estimator_with_no_y_value(self):
class DummyEstimator(BaseEstimator, RegressorMixin):
def __init__(self):
self.average = None
def fit(self, x, y=None):
self.average = np.mean(x, axis=0)
return self
def predict(self, x):
return self.average
class DummyData(Dataset):
def load_training_data(self):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]}), None
def load_prediction_data(self, *args, **kwargs):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]})
model = Model(DummyEstimator())
data = DummyData()
model.train_estimator(data)
assert np.all(np.isclose(model.estimator.average, np.array([2.5, 5.5])))
with pytest.raises(DatasetError, match="The dataset does not define a y value"):
data.create_train_test()
class TestScoreEstimator:
def test_score_estimator_creates_train_test_data(
self, boston_dataset, train_boston_dataset
):
model = Model(LinearRegression())
data = boston_dataset()
model.score_estimator(data)
test = train_boston_dataset
| pd.testing.assert_frame_equal(data.test_x, test.test_x) | pandas.testing.assert_frame_equal |
# Copyright (c) Gradient Institute. All rights reserved.
# Licensed under the Apache 2.0 License.
"""`hypothesis` strategies to generate test data."""
import logging
from typing import Callable, Optional, Tuple, Union
import hypothesis as hyp
import hypothesis.extra as hxt
import hypothesis.strategies as hst
import numpy as np
import pandas as pd
from hypothesis.extra import numpy as hnp
logger = logging.getLogger()
@hst.composite
def Xy_np(
draw: Callable, n_rows: Optional[Union[int, hst.SearchStrategy[int]]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate sklearn data as numpy arrays.
A `hypothesis` strategy.
By construction, constructed arrays:
- are 2d
- have no infinite values
- X and y arrays have at least 1 entry # TODO: weaken assumption on y?
- X and y arrays have same number of rows
Parameters
----------
draw : Callable
Should be of type hst.SearchStrategy[A] -> A
Passed in by hst.composite decorator to construct a composite strategy
n_rows : Optional[Union[int, hst.SearchStrategy[int]]], optional
Number of data rows. If strategy, draw from it. If None, draw from default
strategy; integer between 1 and 10. By default None
Returns
-------
(X, y) : Tuple[np.ndarray, np.ndarray]
Input, output test data
"""
if n_rows is None:
n_rows_ = draw(hst.integers(min_value=1, max_value=10))
elif not isinstance(n_rows, int):
n_rows_ = draw(n_rows)
else:
n_rows_ = n_rows
n_X_cols = draw(hst.integers(min_value=1, max_value=10))
n_y_cols = draw(hst.integers(min_value=1, max_value=2))
X_shape = (n_rows_, n_X_cols)
y_shape = (n_rows_, n_y_cols)
# logger.info(f"{X_shape}, {y_shape}")
dtype_strategy = hst.one_of(
hnp.floating_dtypes(endianness="<"),
# TODO: re-introduce other types
# hxt.numpy.boolean_dtypes(),
# hxt.numpy.integer_dtypes(endianness="<"), # scipy expects little-endian
# hxt.numpy.unsigned_integer_dtypes(),
# hxt.numpy.complex_number_dtypes()
)
X_strategy = hxt.numpy.arrays(dtype=dtype_strategy, shape=X_shape)
y_strategy = hxt.numpy.arrays(dtype=dtype_strategy, shape=y_shape)
X = draw(X_strategy)
y = draw(y_strategy)
# filter infinities (TODO: this could be made more efficient)
hyp.assume(np.all(np.isfinite(X)))
hyp.assume(np.all(np.isfinite(y)))
return X, y
@hst.composite
def Xy_pd(
draw: Callable, n_rows: Optional[Union[int, hst.SearchStrategy[int]]] = None
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Generate sklearn data as numeric pandas arrays.
A light wrapper around Xy_np.
Parameters
----------
draw : Callable
Should be of type hst.SearchStrategy[A] -> A
Passed in by hst.composite decorator to construct a composite strategy
n_rows : Optional[Union[int, hst.SearchStrategy[int]]], optional
Number of data rows. If strategy, draw from it. If None, draw from default
strategy; integer between 1 and 10. By default None
Returns
-------
(X, y) : Tuple[pd.DataFrame, pd.DataFrame]
Input, output test data
"""
n_rows_ = hst.integers(min_value=1, max_value=10) if n_rows is None else n_rows
X, y = draw(Xy_np(n_rows=n_rows_))
X_pd = | pd.DataFrame(X) | pandas.DataFrame |
# Project: fuelmeter-tools
# Created by: # Created on: 5/7/2020
from pandas.tseries.offsets import MonthEnd
from puma.Report import Report
import pandas as pd
import numpy as np
import puma.plot as pplot
import puma.tex as ptex
import datetime
import os
class MultiMonthReport(Report):
def __init__(self,start,end,title,nc,houses,monthly_fuel_price):
super(MultiMonthReport, self).__init__(start,end,title,nc,houses,monthly_fuel_price)
def getAveCostPerDay(self):
'''calculates the average cost of fuel per day. If the attribute gph_hdd
is available this will be used to calculate costs otherwise the attribute
fuel_by_day is used.'''
if 'gpd_hdd' not in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
else:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
return self.cost_per_day.mean()
def getCostPerDay(self,fuel_by_day):
'''calculate cost for each day based on a fuel price for each day and fuel consumption for each day'''
self.fuel_price.name = 'fuel_price'
df = pd.concat([fuel_by_day, self.fuel_price.groupby(pd.Grouper(freq='D')).mean()], axis=1)
df.fuel_price = df.fuel_price.ffill() # filled for days that did not match
return df.fuel_consumption * df.fuel_price
# def getEstimatedTotalGallons(self):
# '''calculates the total gallons used each month and sets the attribute gallons_by_month
# :return float total gallons for the entire report period'''
# self.estimated_gallons_by_month = self.calculateTotalGallonsByMonth()
# return self.gallons_by_month.sum()
def getCostPerMonth(self):
'''calculates the total cost of consumed fuel per month by summing cost per day for every day within a month'''
if self.cost_per_day == None:
if 'gpd_hdd' in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
else:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
self.cost_per_month = self.cost_per_day.groupby(pd.Grouper(freq="M")).sum()
return
def getTotalCost(self):
'''uses hdd corrected estimate of fuel consumption to estimate cost per day and aggregate to the entire report period.'''
costPerDay = self.getCostPerDay(self.gpd_hdd)
return costPerDay.sum()
def calculateMeanDailyGallonsPerMonth(self):
'''Calculates the total gallons consumed by month based on an average daily consumption rate for each month'''
#actual measured total by day We use a count of 5 records as our cutoff for producing a legit average
groupedDaily = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq="D")).sum(min_count=5) #total gallons each day
#total days needing estimates
self.meanDailyByMonth = groupedDaily.groupby(pd.Grouper(freq='M')).agg(['mean','count']) #total daily gallons averaged over month
self.meanDailyByMonth = self.meanDailyByMonth.loc[self.meanDailyByMonth['count'] >=15,'mean'] #drop months with fewer than 20 days of data
#estimatedTotalByMonth = self.meanDailyByMonth * self.meanDailyByMonth.index.days_in_month #use the average to calculate a total amount for the month
return
def calculateMeanGallonsPerMonth(self):
'''get the average gallons consumed for all months in the reporting period'''
tgpm = self.calculateTotalGallonsByMonth()
return tgpm.mean()
def getGallonsPerFt(self):
'''get the total gallons used in the report period per house area (square feet).
sets the aveGPFByYear attribute which is the totalGPF for each year averaged over all years.
:return float total gallons per house square footage for the report period'''
totalGPF = super().getGallonsPerFt()
AveDailyByYear = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq='A')).mean()
self.aveGPFByYear = AveDailyByYear/self.area
return totalGPF
def makePlots(self):
'''produces pngs of plots specific to this report'''
os.chdir(self.name)
outDoor = self.ave_MonthlyoutdoorT['ave']
pplot.plotActualvsEstimated(self.actualGallonsByMonth, self.estimatedGallonsByMonth['total_fuel'] )
pplot.plot_multiyear_bar_progress_with_temperature(self.actualAverageGallonsPerHDDByMonth['average_gphdd'], outDoor[self.start:self.end],
'monthly_track_your_progress.png')
you = self.getMeanGallonsPerMonthPerAreaByYear()
you.name = 'you'
df = pd.concat([you, self.yearly_neighbor_ave_monthly_usage_per_area], join='inner', axis=1)
pplot.plot_annual_fuel_usage(df, 'fuel_usage.png')
gph = pd.DataFrame(self.gph,index = self.gph.index)
gph['season'] = 0
gph.loc[(gph.index.month >= 1) & (gph.index.month <= 3), 'season'] = 1
gph.loc[(gph.index.month >= 4) & (gph.index.month <= 6), 'season'] = 2
gph.loc[(gph.index.month >= 7) & (gph.index.month <= 9), 'season'] = 3
gph.loc[(gph.index.month >= 10) & (gph.index.month <= 12), 'season'] = 4
ave_gal_by_hour_by_season = gph.groupby([gph.season, gph.index.hour]).mean()
pplot.seasonal_polar_flow_plot(ave_gal_by_hour_by_season,
'seasonal_polar_plot.png')
os.chdir("..")
return
def getAveCostPerYear(self):
'''calculate the average cost per year based on the average daily cost for the report period'''
return self.ave_cost_per_day * 365
def getMeanGallonsPerMonthPerAreaByYear(self):
gpmpa = self.estimatedGallonsByMonth/self.area
gpmpa = pd.concat([gpmpa,self.actualGallonsByMonth], axis=1)
gpmpa = gpmpa[pd.notnull(gpmpa.iloc[:,1])] #estimate is only produced for months with at least 15 days of actual data
AverageGPMPerArea = gpmpa['total_fuel'].groupby(pd.Grouper(freq='A')).mean()
return AverageGPMPerArea
def getYearlyNeigborhoodUsagePerArea(self):
return self.neighborhood.getMeanMonthlyGPFByYear(self.houses)
def getNeighborhoodUsagePerArea(self):
return self.neighborhood.getUsageTable([])
def compare2Neighbors(self):
'''generate neighborhood metrics'''
super().compare2Neighbors()
self.yearly_neighbor_ave_monthly_usage_per_area, self.yearly_neighbor_usage_std_per_area =self.getYearlyNeigborhoodUsagePerArea()
self.neighborhoodUsage = self.getNeighborhoodUsagePerArea()
return
def generateSummaryTable(self,cost):
'''create a summary table of fuel usage, costs and temperatures by month'''
combinedData = pd.concat([np.round(self.estimatedGallonsByMonth['total_fuel'],2), self.estimatedGallonsByMonth['sample_size'],np.round(self.meanDailyByMonth,4), np.round(self.ave_MonthlyindoorT['ave'], 0),
np.round(self.ave_MonthlyoutdoorT['ave'], 0)], axis=1)
combinedData = combinedData[:self.estimatedGallonsByMonth.index[-1]]
combinedData.columns = ['total_gal_by_month','sample_size','ave_daily_by_month','ave_indoor_t_by_month','ave_outdoor_t_by_month']
combinedData.loc[pd.isnull(combinedData['ave_indoor_t_by_month']), 'ave_daily_by_month'] = np.nan
combinedData['ave_daily_cost_by_month'] = np.round(combinedData['ave_daily_by_month'] * cost,2)
combinedData['total_cost_by_month'] = np.round(combinedData['total_gal_by_month'] * cost,2)
#self.estimatedCostByMonth = combinedData['total_cost_by_month']
combinedData['month_year'] = [datetime.datetime.strftime(pd.to_datetime(i),format="%b %y") for i in combinedData.index]
combinedData['total_cost_by_month'] = combinedData['total_cost_by_month'].map('\${:,.2f}'.format)
combinedData['ave_daily_cost_by_month'] = combinedData['ave_daily_cost_by_month'].map('\${:,.2f}'.format)
combinedData = combinedData[self.reportRange[0]:self.reportRange[-1]]
combinedData = combinedData.astype(str)
#combinedData = combinedData.astype(dtype=pd.StringDtype())
combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'ave_daily_by_month'] = combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'ave_daily_by_month'] + " (" + combinedData.loc[combinedData['ave_daily_by_month'] != 'nan', 'sample_size'] + ")"
subset = combinedData[['month_year','ave_daily_by_month','ave_daily_cost_by_month','total_gal_by_month', 'total_cost_by_month','ave_indoor_t_by_month','ave_outdoor_t_by_month']]
myTable = [tuple(x) for x in subset.to_numpy()]
return myTable
def generateHighMonths(self):
'''calculate which months are in the 90th percentile for fuel consumption for the entier report period based on gallons_by_month attribute
:return list of string month names'''
highValue = np.percentile(self.estimatedGallonsByMonth , 90)
highMonths = self.estimatedGallonsByMonth [self.estimatedGallonsByMonth > highValue].index.month
if len(highMonths) > 0:
return [datetime.datetime.strftime(datetime.datetime(2021, h, 1), format="%B") for h in highMonths]
else:
return None
def generateMetrics(self):
super().generateMetrics() #generate all the metrics used in monthly reports
self.calculateMeanDailyGallonsPerMonth() #gpm is an estimated average per month
self.aveYearlyCost = self.getAveCostPerYear()
firstIndex = self.indoorTData[pd.notnull(self.indoorTData['inT'])].index[0]
lastIndex = self.indoorTData[pd.notnull(self.indoorTData['inT'])].index[-1] + MonthEnd(1)
if lastIndex.month == firstIndex.month:
lastIndex = lastIndex + pd.to_timedelta('1d')
if firstIndex.hour == 2: #starting a sequence at 2 am will result in an error once day light savings time ends
firstIndex = firstIndex + pd.to_timedelta('1 h')
elif firstIndex.hour == 1:
firstIndex = firstIndex + | pd.to_timedelta('2 h') | pandas.to_timedelta |
from flask import Flask, render_template, request, redirect, make_response, url_for
app_onc = Flask(__name__)
import astrodbkit
from astrodbkit import astrodb
from SEDkit import sed
from SEDkit import utilities as u
import os
import sys
import re
from io import StringIO
from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import ColumnDataSource, HoverTool, OpenURL, TapTool, Range1d
from bokeh.models.widgets import Panel, Tabs
from astropy import units as q
from astropy.coordinates import SkyCoord
import astropy.constants as ac
from scipy.ndimage.interpolation import zoom
import pandas as pd
import numpy as np
TABLE_CLASSES = 'display no-wrap hover table'
app_onc.vars = dict()
app_onc.vars['query'] = ''
app_onc.vars['search'] = ''
app_onc.vars['specid'] = ''
app_onc.vars['source_id'] = ''
db_file = os.environ['ONC_database']
db = astrodb.Database(db_file)
| pd.set_option('max_colwidth', -1) | pandas.set_option |
from numpy import where
from collections import Counter
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from pandas import read_csv
import pandas as pd
import numpy as np
# define dataset
X, y = make_blobs(n_samples=1000, centers=4, random_state=1)
va = read_csv('Walking.csv')
MaxAx = va.MaxAx
MinAy = va.MinAy
MaxGx = va.MaxGx
tmparr = np.zeros_like(MaxAx)
aa = pd.DataFrame({"MaxAx": MaxAx, "MinAy": MinAy}) # x,y in 2D
y = va.PoseName
X = np.array(aa)
# summarize dataset shape
print(X.shape, y.shape)
# summarize observations by class label
counter = Counter(y)
print(counter)
# summarize first few examples
# for i in range(10):
# print(X[i], y[i])
# plot the dataset and color the by class label
for label, _ in counter.items():
row_ix = where(y == label)[0]
plt.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
plt.figure(1)
plt.xlabel('Max Ax')
plt.ylabel('Min Ay')
plt.title('Walking')
plt.legend()
plt.show()
va = read_csv('Upstair.csv')
MaxAx = va.MaxAx
MinAy = va.MinAy
MaxGx = va.MaxGx
tmparr = np.zeros_like(MaxAx)
aa = | pd.DataFrame({"MaxAx": MaxAx, "MinAy": MinAy}) | pandas.DataFrame |
import dask.dataframe as dd
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
from featuretools.utils.gen_utils import Library
def test_create_entity_from_dask_df(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"].df, npartitions=2)
dask_es = dask_es.entity_from_dataframe(
entity_id="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(pd_es["log"].df, dask_es["log_dask"].df.compute(), check_like=True)
def test_create_entity_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), dask_es['new_entity'].df.compute())
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
# Test error is raised when trying to add Dask entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(dask_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=dask_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing dask entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(dask_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
dask_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [ | pd.to_datetime('2019-01-10') | pandas.to_datetime |
import json
import os
import numpy as np
import pandas as pd
import arviz as az
if os.environ.get("USEGIT") == "true":
env_name = "git"
else:
env_name = "pypi-cran"
with open(f"8school_results_{env_name}.json") as f:
res = json.load(f)
res = np.array(res)
print(res.shape)
res = {"X": np.swapaxes(res, 0, 1)}
res_warmup = {"X": res["X"][:, :-4000, :]}
res = {"X": res["X"][:, -4000:, :]}
print(res["X"].shape)
print(res_warmup["X"].shape)
idata = az.from_dict(res)
print(idata)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
print(az.summary(idata))
with open(f"posterior_summary_{env_name}.json") as f:
res_posterior_summary = json.load(f)
res_posterior_summary = pd.DataFrame.from_records(
res_posterior_summary, index="variable"
)
res_posterior_summary.index.name = None
print(res_posterior_summary)
reference = (
pd.read_csv(
f"./reference_posterior_{env_name}.csv", index_col=0, float_precision="high"
)
.reset_index()
.astype(float)
)
# test arviz functions
funcs = {
"rhat_rank": lambda x: az.rhat(x, method="rank"),
"rhat_raw": lambda x: az.rhat(x, method="identity"),
"ess_bulk": lambda x: az.ess(x, method="bulk"),
"ess_tail": lambda x: az.ess(x, method="tail"),
"ess_mean": lambda x: az.ess(x, method="mean"),
"ess_sd": lambda x: az.ess(x, method="sd"),
"ess_median": lambda x: az.ess(x, method="median"),
"ess_raw": lambda x: az.ess(x, method="identity"),
"ess_quantile01": lambda x: az.ess(x, method="quantile", prob=0.01),
"ess_quantile10": lambda x: az.ess(x, method="quantile", prob=0.1),
"ess_quantile30": lambda x: az.ess(x, method="quantile", prob=0.3),
"mcse_mean": lambda x: az.mcse(x, method="mean"),
"mcse_sd": lambda x: az.mcse(x, method="sd"),
"mcse_median": lambda x: az.mcse(x, method="quantile", prob=0.5),
"mcse_quantile01": lambda x: az.mcse(x, method="quantile", prob=0.01),
"mcse_quantile10": lambda x: az.mcse(x, method="quantile", prob=0.1),
"mcse_quantile30": lambda x: az.mcse(x, method="quantile", prob=0.3),
}
results = {}
for key, coord_dict, vals in az.plots.plot_utils.xarray_var_iter(
idata.posterior, combined=True
):
if coord_dict:
key = "{}".format(list(coord_dict.values())[0] + 1)
results[key] = {func_name: func(vals) for func_name, func in funcs.items()}
arviz_data = | pd.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
| Series({'A': 1, 'B': 3, 'C': 3}) | pandas.Series |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEquals(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples(
[('1a', '2a'), ('1a', '2b'), ('1a', '2c')])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assert_(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assert_((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assert_((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3),
(2, 2), (2, 4)]))
s2 = Series([1, 2, 3, 4],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
df = DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
df = DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'),
('Colorado', 'Green')])
index = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.icol(1)
tm.assert_isinstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assert_((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEquals(s[("a", 5)], 5)
self.assertEquals(s[("a", 6)], 6)
self.assertEquals(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo','demo','demo','demo']
idx = MultiIndex.from_tuples(idx_tp,names = ['STK_ID','RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEquals(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['bah','bam',3.0,3],
['bah','bam',4.0,4],['foo','bar',5.0,5],['bah','bam',6.0,6]],
columns=list('ABCD'))
df = df.set_index(['A','B'])
df = df.sortlevel(0)
expected = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['foo','bar',5.0,5]],
columns=list('ABCD')).set_index(['A','B'])
result = df.loc[('foo','bar')]
| assert_frame_equal(result,expected) | pandas.util.testing.assert_frame_equal |
from itertools import product
import pytest
import numpy as np
import pandas as pd
import iguanas.rule_scoring.rule_scoring_methods as rsm
import iguanas.rule_scoring.rule_score_scalers as rss
from iguanas.rule_scoring import RuleScorer
from iguanas.metrics.classification import Precision
@pytest.fixture
def create_data():
np.random.seed(0)
X_rules = pd.DataFrame({
'A': np.random.randint(0, 2, 1000),
'B': np.random.randint(0, 2, 1000),
'C': np.random.randint(0, 2, 1000),
})
y = pd.Series(np.random.randint(0, 2, 1000))
weights = (y + 1) * 2
return X_rules, y, weights
@pytest.fixture
def expected_results():
expected_results = {
('LR', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -10.0, 'B': -100.0, 'C': -32.0}),
('LR', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 10.0, 'B': 100.0, 'C': 32.0}),
('LR', 'CS(-100)', 'No weights'): pd.Series({'A': -69.0, 'B': -100.0, 'C': -77.0}),
('LR', 'CS(100)', 'No weights'): pd.Series({'A': 69.0, 'B': 100.0, 'C': 77.0}),
('PS', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -10.0, 'B': -100.0, 'C': -36.0}),
('PS', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 10.0, 'B': 100.0, 'C': 36.0}),
('PS', 'CS(-100)', 'No weights'): pd.Series({'A': -91.0, 'B': -100.0, 'C': -94.0}),
('PS', 'CS(100)', 'No weights'): pd.Series({'A': 91.0, 'B': 100.0, 'C': 94.0}),
('RFS', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -100.0, 'B': -26.0, 'C': -10.0}),
('RFS', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 100.0, 'B': 26.0, 'C': 10.0}),
('RFS', 'CS(-100)', 'No weights'): pd.Series({'A': -100.0, 'B': -73.0, 'C': -68.0}),
('RFS', 'CS(100)', 'No weights'): pd.Series({'A': 100.0, 'B': 73.0, 'C': 68.0}),
('LR', 'MMS(-100, -10)', 'Weights'): pd.Series({'A': -10, 'B': -100, 'C': -31}),
('LR', 'MMS(100, 10)', 'Weights'): pd.Series({'A': 10, 'B': 100, 'C': 31}),
('LR', 'CS(-100)', 'Weights'): pd.Series({'A': -69, 'B': -100, 'C': -76}),
('LR', 'CS(100)', 'Weights'): pd.Series({'A': 69, 'B': 100, 'C': 76}),
('PS', 'MMS(-100, -10)', 'Weights'): | pd.Series({'A': -10, 'B': -100, 'C': -36}) | pandas.Series |
# -*- coding: utf-8 -*-
"""Copy of All 1DCNN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/18gSDf4XkvJZMP99AeDXIuz6RaI35i0zZ
"""
from six.moves import cPickle as pickle
import keras
from keras.models import Sequential
from keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout
from keras.callbacks import ModelCheckpoint
from google.colab import drive
drive.mount('/content/drive')
data_dir = '/content/drive/My Drive/Colab Notebooks/HEX New folder'
# Commented out IPython magic to ensure Python compatibility.
import glob
import os
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
# %matplotlib inline
# normalize inputs from 0-255 to 0-1
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
#from keras.utils import to_categorical
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import pandas.util.testing as tm
def ReshapeY(Y_train,n):
Y = list()
for x in Y_train:
Y.append(find_1(x, n))
Y = np.array(Y)
return Y
print(Y.shape)
# look for 1 ( spoof) in each
def find_1(x, n):
if 1 in x:
res = 1
else:
res = 0
return res
def LOAD_data(path ):
filenames = glob.glob(path + "/*.csv")
dfs = []
for filename in filenames:
df=pd.read_csv(filename)
if 'le0.csv'== filename[-7:]:
df['attack'] = 0
df = df[190:]
else:
df['attack'] = 1
dfa = df['attack']
df = df[14:]
df = df.iloc[:-180]
df = df.select_dtypes(exclude=['object','bool']) #remove nan
df = df.loc[:, (df != 0).any(axis=0)] #remove zeros
df = df.drop(df.std()[(df.std() == 0)].index, axis=1) #remove equals
df=((df-df.min())/(df.max()-df.min()))*1
df['attack'] = dfa
dfs.append(df)
# Concatenate all data into one DataFrame
df = pd.concat(dfs, ignore_index=True)
#df.head()
# Concatenate all data into one DataFrame
df = pd.concat(dfs, ignore_index=True)
#df.head()
df = df.select_dtypes(exclude=['object','bool']) #remove nan
df = df.loc[:, (df != 0).any(axis=0)] #remove zeros
df = df.drop(df.std()[(df.std() == 0)].index, axis=1) #remove equals
sf = df[['roll', 'pitch', 'heading', 'rollRate', 'pitchRate', 'yawRate',
'groundSpeed', 'altitudeRelative',
'throttlePct', 'estimatorStatus.horizPosRatio',
'estimatorStatus.vertPosRatio',
'estimatorStatus.horizPosAccuracy','gps.courseOverGround']]
scaled_data = scale(sf)
pca = PCA(n_components = 9)
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
pca_data = pd.DataFrame(pca_data)
df_sf = | pd.concat([pca_data, df[['attack']]], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Daniel
This script contains all the functions used to analyse the structural data.
It contains a function to calculate spearmans correlation, cohens d and kruskal test.
"""
import pandas as pd
from scipy import stats
import math
def correlation(column, volumearray, volume):
'''
Runs Spearmans correlation.
Input: df with measures that area all used in the correlation.
Second array/df with thrid variable (string) that states which measure from this second array/df to be used in the correlation.
Returns: pvalues and correlation values in list
'''
pvalues=[]
correlation=[]
for i in column.columns:
array=pd.concat([volumearray[volume], column[i]],axis=1).dropna()
c, p =stats.spearmanr(array[volume], array[i])
pvalues.append(p)
correlation.append(c)
return pvalues, correlation
def cohen_d(group1,group2):
'''
Calculate cohens d.
Input: two series/list
Output: d
'''
diff = group1.mean()-group2.mean()
pooledstdev = math.sqrt((group1.std()**2 + group2.std())/2 )
cohend = diff / pooledstdev
return cohend
def kruskal(x,y,z):
'''
Runs kruskal-wallis test.
Input: Series from three groups.
Output: df with pvals, degrees of freedom and eta/epsilon effect sizes
'''
number_of_observations = len(pd.concat([x,y,z]))
number_of_groups = len([x, y, z] )
h,p = stats.kruskal(x,y,z)
degrees_of_freedom=(number_of_observations-number_of_groups)-(number_of_groups)
eta = (h-number_of_groups+1)/(number_of_observations-number_of_groups)
epsilon = h/((number_of_observations**2-1)/(number_of_observations+1))
df= | pd.DataFrame({'pval': [p], 'kruskal_test_statistic': [h],'df': [degrees_of_freedom],'eta': [eta],'epsilon':[epsilon]}) | pandas.DataFrame |
# Data sourced from https://archive.ics.uci.edu/ml/datasets/banknote+authentication#
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import model_selection, metrics
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
bank = | pd.read_csv('bank_note_data.csv') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.