prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
class qiimetophitsearch:
def __init__(self, filename):
import pandas as pd
self.filename = filename
def tophit(self, spcname):
#import pandas as pd
df = pd.read_csv(self.filename, delimiter='\t', header=1, index_col=0)
return(spcname + '\t' + df[spcname].idxmax())
def tophit_all(self):
#import pandas as pd
maxtaxa=[]
df =
|
pd.read_csv(self.filename, delimiter='\t', header=1, index_col=0)
|
pandas.read_csv
|
"""
base_lines.py - microscopy images class
update: 20191001 - modify gaussian fit function
"""
import os
import sys
from tqdm import tqdm_notebook
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
from scipy.signal import argrelmax
# local library
from .rfit import robust_line_fit
from .rfit import robust_inverseabs_fit
from .rfit import gaussian, gaussian2, gaussian3, line, inverseabs
from .base_filters import ImageFilter
from .lineobject import LineObject
from .lineobject import _smooth
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.1.0'
class ImageLine(ImageFilter):
""" Image based on channel experiments - lineprofile and migration angle detection """
def __init__(self, objects, method='tifffile', debug=False, **kwargs):
""" initialization """
super().__init__(objects, method=method, debug=debug, **kwargs)
""" TifFile class initialization """
# for graphic representation
self._ax1 = None
self._ax2 = None
# fitting info
self._peakdatafname = '.{}_peakinfo.csv'.format(self._meta['basename'])
if os.path.isfile(self._peakdatafname):
if self._debug:
print('... read peak information: {}'.format(self._peakdatafname))
self._peakdata = pd.read_csv(self._peakdatafname, index_col=0)
else:
self._peakdata = pd.DataFrame()
self._lineobject = []
self._kfit = []
self._baseline = 0.0
self._beaminfo = []
self._beamdirection = 'right'
# full width injection case
self._smoothing = 13
self._searchrange = 10
def __repr__(self):
""" representation """
msg = super().__repr__()
msg += '-'*50 + '\n'
msg += '... Wall positions: ({}, {})\n'.format(self._meta['wall1'], self._meta['wall2'])
msg += '... Array positions: ({}, {})\n'.format(self._meta['range1'], self._meta['range2'])
msg += '... Migration Angle: {:.4f} [deg]\n'.format(self._meta['mangle'])
msg += '... Frame Angle: {:.4f} [deg]\n'.format(self._meta['fangle'])
msg += '... Diffusion constant: {:.4f} [um2/s]\n'.format(self._meta['D'])
msg += '... Peclet number: {:.4f}\n'.format(self._meta['Pe'])
msg += '... Pressure: {:.4f} [bar]\n'.format(self._meta['p'])
msg += '... Velocity: {:.5f} [um/s]\n'.format(self._meta['u'])
msg += '... Particle size: {:.4f} [nm]\n'.format(self._meta['psize'])
return msg
def set_wallinfo(self, wallinfo=[0, 512, 0, 512], show=False):
""" manually set wall information """
if len(wallinfo) == 4:
self._meta.update_wall(wallinfo)
else:
print('... wallinfo = [wall1, wall2, range1, range2]')
return
def set_expInfo(self, magnification=None, velocity=-1, p=-1, fangle=0.0, psize=-1, ccd_length=16.0):
""" set experimental values """
if isinstance(magnification, str):
self._meta.update_mag(magnification)
self._meta.update_wall([0, 0, 0, 0])
if velocity > -1: self._meta['u'] = velocity
if p > -1: self._meta['p'] = p
if fangle != 0: self._meta['fangle'] = fangle
if psize > -1: self._meta['psize'] = psize
# TODO add bulk diffusion contant
# line profile
def getline_obj(self, frame=-1, coords=None, dtypes='orig'):
""" generate line object using coordinates """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
self._lineobject = _getline_obj(img, coords=coords)
return self._lineobject
def getline_x(self, frame=-1, y=-1, dtypes='orig', **kwargs):
""" get line profile along x axis """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
return self._getline_x(img, y=y, **kwargs)
def getline_y(self, frame=-1, x=-1, dtypes='orig'):
""" get line profile along y axis """
if frame == -1: img = self.tmean(dtypes=dtypes)
else: img = self.getframe(frame=frame, dtypes=dtypes)
return self._getline_y(img, x=x)
def _getline_x(self, img, y=-1, ignore_wall=False):
""" get line profile along x axis using coordinates """
if (not ignore_wall):
xs, xf = self._meta['wall1'], self._meta['wall2']
return _getline_obj(img, coords=[xs, y, xf, y])
def _getline_y(self, img, x=-1, ignore_wall=False):
""" get line profile along y axis using coordinates """
if (not ignore_wall):
ys, yf = self._meta['range1'], self._meta['range2']
return _getline_obj(img, coords=[x, ys, x, yf])
def getlines_x_shadow(self, locations, raw=False):
""" get lines with background subtraction """
results = []
results_raw = []
for i in range(len(locations)):
line = _zprofile(self.tmean(), locations[i])
results_raw.append(np.copy(line))
line -= self._findShadowline(locations[i])
results.append(_smooth(line, window_len=self._smoothing))
if raw:
return (results, results_raw)
else:
return results
# fit line profile with various methods
def fitline_x(self, frame=-1, y=-1, method='peaks', **kwargs):
""" fitting line intensity profile along x axis """
line_obj = self.getline_x(frame=frame, dtypes='float', y=y)
self._lineobject = line_obj
return self._fitline(line_obj, method=method, **kwargs)
def fitline_y(self, frame=-1, x=-1, method='peaks', **kwargs):
""" fitting line intensity profile along y axis """
line_obj = self.getline_y(frame=frame, dtypes='float', x=x)
self._lineobject = line_obj
return self._fitline(line_obj, method=method, **kwargs)
def _fitline_x(self, img, y=-1, method='peaks', **kwargs):
line_obj = self._getline_x(img, y=y)
return self._fitline(line_obj, method=method, **kwargs)
def _fitline_y(self, img, x=-1, method='peaks', **kwargs):
line_obj = self._getline_y(img, x=x)
return self._fitline(line_obj, method=method, **kwargs)
def _fitline(self, line_obj, method='gaussian', **kwargs):
""" get line profile and find peak """
nu = kwargs.pop('nu', 100)
if method == 'gaussian':
if self._debug: print('... gaussian fit')
line_obj.fit_gaussian_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'gaussian2':
if self._debug: print('... double gaussian fit')
line_obj.fit_gaussian2_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'gaussian3':
if self._debug: print('... triple gaussian fit')
line_obj.fit_gaussian3_from(nu=nu, kfit=self._kfit, **kwargs)
elif method == 'peaks':
if self._debug: print('... peak find fit')
line_obj.fit_peaks(**kwargs)
elif method == 'gcdf':
if self._debug: print('... gaussian cdf fit')
line_obj.fit_gcdf(nu=nu)
return line_obj
def fitlines_x(self, locs=-1, method='gaussian', update=False, **kwargs):
return self._fitlines_x(self.tmean(), locs=locs, method=method, update=update, **kwargs)
def _fitlines_x(self, img, locs=-1, method='gaussian', update=False, **kwargs):
""" get peak position datasheet at locs """
# set all y
if locs == -1:
locs = range(int(self._meta['range1']), int(self._meta['range2']))
# read from cache
if (not update) and os.path.isfile(self._peakdatafname):
if self._debug: print('... read from %s' % self._peakdatafname)
self._peakdata =
|
pd.read_csv(self._peakdatafname, index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from time import sleep
import pandas as pd
import os
# In[2]:
# In[12]:
CHROMEDRIVER_PATH = './../chromedriver'
URL = 'https://www.forbes.com/global2000/'
# Prepare DataFrame
df_list = []
# Init browser instance
browser = webdriver.Chrome(CHROMEDRIVER_PATH)
# Get page
browser.get(URL)
# Reload page
browser.refresh()
delay = 3 # seconds
pages_count = 5
parse_timeout_max = 10
df = pd.DataFrame()
try:
last_id = 0
for page_i in range(pages_count):
# Wait table loading
WebDriverWait(browser, delay).until(
EC.presence_of_element_located((By.XPATH, '//table//tr//td')))
# Get table root
table_element = WebDriverWait(browser, delay).until(
EC.presence_of_element_located((By.XPATH, '//table')))
# Parse table
table_html_string = table_element.get_attribute('outerHTML')
# Parse table untill timeout or new data
parse_timeout = parse_timeout_max
while df.empty or (last_id == df.iloc[-1]['Rank']):
# Parse table
df = pd.read_html(table_html_string)[0]
# Check timeout
parse_timeout -= 1
if parse_timeout == 0:
raise RuntimeError("No new data for parsing.")
# Wait reloading
sleep(1)
print("Last parsed rank: %d" % last_id)
# Save new id
last_id = df.iloc[-1]['Rank']
# Add parsed table
df_list.append(
|
pd.read_html(table_html_string)
|
pandas.read_html
|
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self, all_compare_operators):
op = all_compare_operators
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
f = getattr(operator, op)
# test that comparisons work
val = ser[5]
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "__ne__":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# result = f(val, ser)
# expected = f(val, ser.dropna()).reindex(ser.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts =
|
Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
|
pandas.Series
|
import errno
import logging
import os
import pandas as pd
from xlrd.biffh import XLRDError
import uuid
import shutil
import math
import json
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
from installed_clients.GenericsAPIClient import GenericsAPI
from installed_clients.WsLargeDataIOClient import WsLargeDataIO
DATA_EPISTEMOLOGY = ['measured', 'asserted', 'predicted']
PROFILE_CATEGORY = ['community', 'organism']
PROFILE_TYPE = ['amplicon', 'mg', 'modelset']
class ProfileImporter:
@staticmethod
def _mkdir_p(path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@staticmethod
def _validate_params(params, expected, opt_param=set()):
"""Validates that required parameters are present. Warns if unexpected parameters appear"""
expected = set(expected)
opt_param = set(opt_param)
pkeys = set(params)
if expected - pkeys:
raise ValueError("Required keys {} not in supplied parameters"
.format(", ".join(expected - pkeys)))
defined_param = expected | opt_param
for param in params:
if param not in defined_param:
logging.warning("Unexpected parameter {} supplied".format(param))
@staticmethod
def _convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def _calculate_object_size(self, func_profile_data):
json_size = 0
try:
logging.info('start calculating object size')
json_object = json.dumps(func_profile_data).encode("utf-8")
json_size = len(json_object)
size_str = self._convert_size(json_size)
logging.info('serialized object JSON size: {}'.format(size_str))
except Exception:
logging.info('failed to calculate object size')
return json_size
@staticmethod
def _file_to_df(file_path):
logging.info('start parsing file content to data frame')
try:
df = pd.read_excel(file_path, sheet_name='data', index_col=0)
except XLRDError:
try:
df =
|
pd.read_excel(file_path, index_col=0)
|
pandas.read_excel
|
"""to run random forest on sparse one-hot encoded data, but that is going to take a lot of time.
We can also try reducing the sparse one-hot encoded matrices using singular value decomposition.
This is a very common method of extracting topics in natural language processing.
"""
import pandas as pd
from scipy import sparse
from sklearn import decomposition, ensemble, preprocessing, metrics
def run(fold):
# load the full training data with folds
df =
|
pd.read_csv("./input/cat_train_folds.csv")
|
pandas.read_csv
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright (c) 2021, AdW Project'
import logging
import sys
import numpy as np
import pandas as pd
from scipy import stats
from copulas import (
EPSILON, get_instance, get_qualified_name, random_state, store_args)
from copulas.marginals import MarginalDistribution
from copulas.models.model import Multivariate
LOGGER = logging.getLogger(__name__)
DEFAULT_DISTRIBUTION = MarginalDistribution
class GaussianCopula(Multivariate):
covariance = None
columns = None
univariates = None
@store_args
def __init__(self, distribution=DEFAULT_DISTRIBUTION, random_seed=None):
self.random_seed = random_seed
self.distribution = distribution
def __repr__(self):
if self.distribution == DEFAULT_DISTRIBUTION:
distribution = ''
elif isinstance(self.distribution, type):
distribution = 'distribution="{}"'.format(self.distribution.__name__)
else:
distribution = 'distribution="{}"'.format(self.distribution)
return 'GaussianCopula({})'.format(distribution)
def _transform_to_normal(self, X):
if isinstance(X, pd.Series):
X = X.to_frame().T
elif not isinstance(X, pd.DataFrame):
if len(X.shape) == 1:
X = [X]
X = pd.DataFrame(X, columns=self.columns)
U = list()
for column_name, univariate in zip(self.columns, self.univariates):
column = X[column_name]
U.append(univariate.cdf(column.values).clip(EPSILON, 1 - EPSILON))
return stats.norm.ppf(np.column_stack(U))
def _get_correlation(self, X, method):
if method == 'pearson':
result = self._transform_to_normal(X)
correlation = pd.DataFrame(data=result).corr().values
elif method == 'spearman':
correlation = self.spearman_to_copula_correlation(X)
else:
correlation = self.kendall_to_copula_correlation(X)
correlation = np.nan_to_num(correlation, nan=0.0)
# If singular, add some noise to the diagonal
if np.linalg.cond(correlation) > 1.0 / sys.float_info.epsilon:
correlation = correlation + np.identity(correlation.shape[0]) * EPSILON
return correlation
def kendall_to_copula_correlation(self, X):
kendall_correlation =
|
pd.DataFrame(data=X)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import mean_squared_error
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv('u.user', sep='|', names=u_cols,
encoding='latin-1')
X_train = pd.read_csv('X_train.csv')
r_matrix = X_train.pivot_table(values='rating', index='user_id',
columns='movie_id')
X_test = pd.read_csv('X_test.csv')
item =
|
pd.read_csv('item.csv')
|
pandas.read_csv
|
"""
Classes and methods to enable a resistics project
A project is an essential element of a resistics environment together with a
configuration.
In particular, this module includes the core Project, Site and Measurement
clasess and some supporting functions.
"""
from typing import Iterator, Optional, List, Dict
from pathlib import Path
import pandas as pd
import plotly.graph_objects as go
from resistics.common import ResisticsModel, WriteableMetadata
from resistics.sampling import HighResDateTime, to_timestamp
from resistics.time import TimeMetadata, TimeReader
from resistics.plot import plot_timeline
PROJ_FILE = "resistics.json"
PROJ_DIRS = {
"time": "time",
"calibration": "calibrate",
"spectra": "spectra",
"evals": "evals",
"features": "features",
"masks": "masks",
"results": "results",
"images": "images",
}
def get_calibration_path(proj_dir: Path) -> Path:
"""Get the path to the calibration data"""
return proj_dir / PROJ_DIRS["calibration"]
def get_meas_time_path(proj_dir: Path, site_name: str, meas_name: str) -> Path:
"""Get path to measurement time data"""
return proj_dir / PROJ_DIRS["time"] / site_name / meas_name
def get_meas_spectra_path(
proj_dir: Path, site_name: str, meas_name: str, config_name: str
) -> Path:
"""Get path to measurement spectra data"""
return proj_dir / PROJ_DIRS["spectra"] / site_name / config_name / meas_name
def get_meas_evals_path(
proj_dir: Path, site_name: str, meas_name: str, config_name: str
) -> Path:
"""Get path to measurement evaluation frequency spectra data"""
return proj_dir / PROJ_DIRS["evals"] / site_name / config_name / meas_name
def get_meas_features_path(
proj_dir: Path, site_name: str, meas_name: str, config_name: str
) -> Path:
"""Get path to measurement features data"""
return proj_dir / PROJ_DIRS["features"] / site_name / config_name / meas_name
def get_mask_path(proj_dir: Path, site_name: str, config_name: str) -> Path:
"""Get path to mask data"""
return proj_dir / PROJ_DIRS["masks"] / site_name / config_name
def get_mask_name(fs: float, mask_name: str) -> str:
"""Get the name of a mask file"""
from resistics.common import fs_to_string
return f"{fs_to_string(fs)}_{mask_name}.dat"
def get_results_path(proj_dir: Path, site_name: str, config_name: str) -> Path:
"""Get path to solutions"""
return proj_dir / PROJ_DIRS["results"] / site_name / config_name
def get_solution_name(
fs: float, tf_name: str, tf_var: str, postfix: Optional[str] = None
) -> str:
"""Get the name of a solution file"""
from resistics.common import fs_to_string
solution_name = f"{fs_to_string(fs)}_{tf_name.lower()}"
if tf_var != "":
tf_var = tf_var.replace(" ", "_")
solution_name = solution_name + f"_{tf_var}"
if postfix is None:
return solution_name + ".json"
return solution_name + "_" + postfix + ".json"
class Measurement(ResisticsModel):
"""
Class for interfacing with a measurement
The class holds the original time series metadata and can provide
information about other types of data
"""
site_name: str
dir_path: Path
metadata: TimeMetadata
reader: TimeReader
@property
def name(self) -> str:
"""Get the name of the measurement"""
return self.dir_path.name
class Site(ResisticsModel):
"""
Class for describing Sites
.. note::
This should essentially describe a single instrument setup. If the same
site is re-occupied later with a different instrument setup, it is
suggested to split this into a different site.
"""
dir_path: Path
measurements: Dict[str, Measurement]
begin_time: HighResDateTime
end_time: HighResDateTime
def __iter__(self) -> Iterator:
"""Iterator over measurements"""
return self.measurements.values().__iter__()
def __getitem__(self, meas_name: str) -> Measurement:
"""Get a measurement"""
return self.get_measurement(meas_name)
@property
def name(self) -> str:
"""The Site name"""
return self.dir_path.name
@property
def n_meas(self) -> int:
"""Get the number of measurements"""
return len(self.measurements)
def fs(self) -> List[float]:
"""Get the sampling frequencies in the Site"""
fs = [x.metadata.fs for x in self.measurements.values()]
return sorted(list(set(fs)))
def get_measurement(self, meas_name: str) -> Measurement:
"""Get a measurement"""
from resistics.errors import MeasurementNotFoundError
if meas_name not in self.measurements:
raise MeasurementNotFoundError(self.name, meas_name)
return self.measurements[meas_name]
def get_measurements(self, fs: Optional[float] = None) -> Dict[str, Measurement]:
"""Get dictionary of measurements with optional filter by sampling frequency"""
if fs is None:
return self.measurements
return {
name: meas
for name, meas in self.measurements.items()
if meas.metadata.fs == fs
}
def plot(self) -> go.Figure:
"""Plot the site timeline"""
df = self.to_dataframe()
if len(df.index) == 0:
raise ValueError("No measurements found to plot")
df["fs"] = df["fs"].astype(str)
return plot_timeline(df, y_col="name")
def to_dataframe(self) -> pd.DataFrame:
"""
Get measurements list in a pandas DataFrame
.. note::
Measurement first and last times are converted to pandas Timestamps
as these are more universally useful in a pandas DataFrame. However,
this may result in a loss of precision, especially at high sampling
frequencies.
Returns
-------
pd.DataFrame
Site measurement DataFrame
"""
data = [
[
x.name,
x.metadata.fs,
x.metadata.first_time.isoformat(),
x.metadata.last_time.isoformat(),
]
for x in self.measurements.values()
]
df =
|
pd.DataFrame(data=data, columns=["name", "fs", "first_time", "last_time"])
|
pandas.DataFrame
|
import os
from os import listdir
from os.path import join, isfile
from tqdm import tqdm
import pandas as pd
import json
COLUMNS_RENAME_DICT = {"bikes_in_use": "bu", "bikes_total": "bt", "bikes_percentage": "bp"}
def convert_csv_to_json(input_path, output_path):
df =
|
pd.read_csv(input_path, parse_dates=True)
|
pandas.read_csv
|
# _ _ _ _ _ _ _ _
# /\ \ /\ \ _ / /\ /\ \ /\_\/\_\ _ _\ \ /\ \
# / \ \ \ \ \ /_/ / / / \ \ / / / / //\_\/\__ \ \ \ \
# / /\ \ \ \ \ \ \___\/ / /\ \ \ /\ \/ \ \/ / / /_ \_\ /\ \_\
# / / /\ \_\/ / / \ \ \ / / /\ \_\ ____ / \____\__/ / / /\/_/ / /\/_/
# / /_/_ \/_/\ \ \ \_\ \/ /_/_ \/_/\____/\/ /\/________/ / / / / /
# / /____/\ \ \ \ / / / /____/\ \/____\/ / /\/_// / / / / / / /
# / /\____\/ \ \ \/ / / /\____\/ / / / / / / / / ____ / / /
# / / /______ \ \ \/ / / /______ / / / / / / /_/_/ ___/\___/ / /__
# / / /_______\ \ \ / / /_______\ \/_/ / / /_______/\__\/\__\/_/___\
# \/__________/ \_\/\/__________/ \/_/\_______\/ \/_________/
import csv
import importlib
import inspect
import json
import multiprocessing as mp
import os
import pickle
import time
import warnings
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from glob import glob
from typing import (Any, Callable, Dict, Iterable, List, NamedTuple, Optional,
Tuple, Type, Union)
import cloudpickle
import eve.app.space as space
import numpy as np
import pandas
import pandas as pd
from matplotlib import pyplot as plt
################################
# VECTORIZED ENVIRONMENT
################################
# Define type aliases here to avoid circular import
# Used when we want to access one or more VecEnv
VecEnvIndices = Union[None, int, Iterable[int]]
# VecEnvObs is what is returned by the reset() method
# it contains the observation for each env
VecEnvObs = Union[np.ndarray, Dict[str, np.ndarray], Tuple[np.ndarray, ...]]
# VecEnvStepReturn is what is returned by the step() method
# it contains the observation, reward, done, info for each env
VecEnvStepReturn = Tuple[VecEnvObs, np.ndarray, np.ndarray, List[Dict]]
class EveEnv(object):
"""The main OpenAI class. It encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
step
reset
render
close
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
The methods are accessed publicly as "step", "reset", etc...
"""
# Set this in SOME subclasses
metadata = {'render.modes': []}
reward_range = (-float('inf'), float('inf'))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self):
"""Resets the environment to an initial state and returns an initial
observation.
Note that this function should not reset the environment's random
number generator(s); random variables in the environment's state should
be sampled independently between multiple calls to `reset()`. In other
words, each call of `reset()` should yield an environment suitable for
a new episode, independent of previous episodes.
Returns:
observation (object): the initial observation.
"""
raise NotImplementedError
def render(self, mode='human'):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
Example:
class MyEnv(EveEnv):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode == 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
"""
raise NotImplementedError
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
return
@property
def unwrapped(self):
"""Completely unwrap this env.
Returns:
EveEnv: The base non-wrapped EveEnv instance
"""
return self
def __str__(self):
if self.spec is None:
return '<{} instance>'.format(type(self).__name__)
else:
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
"""Support with-statement for the environment. """
return self
def __exit__(self, *args):
"""Support with-statement for the environment. """
self.close()
# propagate exception
return False
class GoalEnv(EveEnv):
"""A goal-based environment. It functions just as any regular OpenAI environment but it
imposes a required structure on the observation_space. More concretely, the observation
space is required to contain at least three elements, namely `observation`, `desired_goal`, and
`achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
`achieved_goal` is the goal that it currently achieved instead. `observation` contains the
actual observations of the environment as per usual.
"""
def reset(self):
# Enforce that each GoalEnv uses a Goal-compatible observation space.
if not isinstance(self.observation_space, space.EveDict):
raise TypeError(
'GoalEnv requires an observation space of type space.EveDict')
for key in ['observation', 'achieved_goal', 'desired_goal']:
if key not in self.observation_space.spaces:
raise KeyError(
'GoalEnv requires the "{}" key to be part of the observation dictionary.'
.format(key))
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
additional rewards that are independent of the goal, you can include the necessary values
to derive it in 'info' and compute it accordingly.
Args:
achieved_goal (object): the goal that was achieved during execution
desired_goal (object): the desired goal that we asked the agent to attempt to achieve
info (dict): an info dictionary with additional information
Returns:
float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
goal. Note that the following should always hold true:
ob, reward, done, info = env.step()
assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)
"""
raise NotImplementedError
class Wrapper(EveEnv):
"""Wraps the environment to allow a modular transformation.
This class is the base class for all wrappers. The subclass could override
some methods to change the behavior of the original environment without touching the
original code.
.. note::
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def render(self, mode='human', **kwargs):
return self.env.render(mode, **kwargs)
def close(self):
return self.env.close()
def seed(self, seed=None):
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def __str__(self):
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
def unwrapped(self):
return self.env.unwrapped
class ObservationWrapper(Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
raise NotImplementedError
class RewardWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
def reward(self, reward):
raise NotImplementedError
class ActionWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(self.action(action))
def action(self, action):
raise NotImplementedError
def reverse_action(self, action):
raise NotImplementedError
class FlattenObservation(ObservationWrapper):
r"""Observation wrapper that flattens the observation."""
def __init__(self, env):
super(FlattenObservation, self).__init__(env)
self.observation_space = space.flatten_space(env.observation_space)
def observation(self, observation):
return space.flatten(self.env.observation_space, observation)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
:param num_envs: the number of environments
:param observation_space: the observation space
:param action_space: the action space
"""
def __init__(self, num_envs: int, observation_space: space.EveSpace,
action_space: space.EveSpace):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self) -> VecEnvObs:
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
:return: observation
"""
raise NotImplementedError()
@abstractmethod
def step_async(self, actions: np.ndarray) -> None:
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
raise NotImplementedError()
@abstractmethod
def step_wait(self) -> VecEnvStepReturn:
"""
Wait for the step taken with step_async().
:return: observation, reward, done, information
"""
raise NotImplementedError()
@abstractmethod
def close(self) -> None:
"""
Clean up the environment's resources.
"""
raise NotImplementedError()
@abstractmethod
def get_attr(self,
attr_name: str,
indices: VecEnvIndices = None) -> List[Any]:
"""
Return attribute from vectorized environment.
:param attr_name: The name of the attribute whose value to return
:param indices: Indices of envs to get attribute from
:return: List of values of 'attr_name' in all environments
"""
raise NotImplementedError()
@abstractmethod
def set_attr(self,
attr_name: str,
value: Any,
indices: VecEnvIndices = None) -> None:
"""
Set attribute inside vectorized environments.
:param attr_name: The name of attribute to assign new value
:param value: Value to assign to `attr_name`
:param indices: Indices of envs to assign value
:return:
"""
raise NotImplementedError()
@abstractmethod
def env_method(self,
method_name: str,
*method_args,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
"""
Call instance methods of vectorized environments.
:param method_name: The name of the environment method to invoke.
:param indices: Indices of envs whose method to call
:param method_args: Any positional arguments to provide in the call
:param method_kwargs: Any keyword arguments to provide in the call
:return: List of items returned by the environment's method call
"""
raise NotImplementedError()
@abstractmethod
def env_is_wrapped(self,
wrapper_class: Type[Wrapper],
indices: VecEnvIndices = None) -> List[bool]:
"""
Check if environments are wrapped with a given wrapper.
:param method_name: The name of the environment method to invoke.
:param indices: Indices of envs whose method to call
:param method_args: Any positional arguments to provide in the call
:param method_kwargs: Any keyword arguments to provide in the call
:return: True if the env is wrapped, False otherwise, for each env queried.
"""
raise NotImplementedError()
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
"""
Step the environments with the given action
:param actions: the action
:return: observation, reward, done, information
"""
self.step_async(actions)
return self.step_wait()
@abstractmethod
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
"""
Sets the random seeds for all environments, based on a given seed.
Each individual environment will still get its own seed, by incrementing the given seed.
:param seed: The random seed. May be None for completely random seeding.
:return: Returns a list containing the seeds for each individual env.
Note that all list elements may be None, if the env does not return anything when being seeded.
"""
pass
@property
def unwrapped(self) -> "VecEnv":
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped # pylint: disable=no-member
else:
return self
def getattr_depth_check(self, name: str,
already_found: bool) -> Optional[str]:
"""Check if an attribute reference is being hidden in a recursive call to __getattr__
:param name: name of attribute to check for
:param already_found: whether this attribute has already been found in a wrapper
:return: name of module whose attribute is being shadowed, if any.
"""
if hasattr(self, name) and already_found:
return f"{type(self).__module__}.{type(self).__name__}"
else:
return None
def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:
"""
Convert a flexibly-typed reference to environment indices to an implied list of indices.
:param indices: refers to indices of envs.
:return: the implied list of indices.
"""
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
class VecEnvWrapper(VecEnv):
"""
Vectorized environment base class
:param venv: the vectorized environment to wrap
:param observation_space: the observation space (can be None to load from venv)
:param action_space: the action space (can be None to load from venv)
"""
def __init__(
self,
venv: VecEnv,
observation_space: Optional[space.EveSpace] = None,
action_space: Optional[space.EveSpace] = None,
):
self.venv = venv
VecEnv.__init__(
self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space,
)
self.class_attributes = dict(inspect.getmembers(self.__class__))
def step_async(self, actions: np.ndarray) -> None:
self.venv.step_async(actions)
@abstractmethod
def reset(self) -> VecEnvObs:
pass
@abstractmethod
def step_wait(self) -> VecEnvStepReturn:
pass
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
return self.venv.seed(seed)
def close(self) -> None:
return self.venv.close()
def get_attr(self,
attr_name: str,
indices: VecEnvIndices = None) -> List[Any]:
return self.venv.get_attr(attr_name, indices)
def set_attr(self,
attr_name: str,
value: Any,
indices: VecEnvIndices = None) -> None:
return self.venv.set_attr(attr_name, value, indices)
def env_method(self,
method_name: str,
*method_args,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
return self.venv.env_method(method_name,
*method_args,
indices=indices,
**method_kwargs)
def env_is_wrapped(self,
wrapper_class: Type[Wrapper],
indices: VecEnvIndices = None) -> List[bool]:
return self.venv.env_is_wrapped(wrapper_class, indices=indices)
def __getattr__(self, name: str) -> Any:
"""Find attribute from wrapped venv(s) if this wrapper does not have it.
Useful for accessing attributes from venvs which are wrapped with multiple wrappers
which have unique attributes of interest.
"""
blocked_class = self.getattr_depth_check(name, already_found=False)
if blocked_class is not None:
own_class = f"{type(self).__module__}.{type(self).__name__}"
error_str = (
f"Error: Recursive attribute lookup for {name} from {own_class} is "
"ambiguous and hides attribute from {blocked_class}")
raise AttributeError(error_str)
return self.getattr_recursive(name)
def _get_all_attributes(self) -> Dict[str, Any]:
"""Get all (inherited) instance and class attributes
:return: all_attributes
"""
all_attributes = self.__dict__.copy()
all_attributes.update(self.class_attributes)
return all_attributes
def getattr_recursive(self, name: str) -> Any:
"""Recursively check wrappers to find attribute.
:param name: name of attribute to look for
:return: attribute
"""
all_attributes = self._get_all_attributes()
if name in all_attributes: # attribute is present in this wrapper
attr = getattr(self, name)
elif hasattr(self.venv, "getattr_recursive"):
# Attribute not present, child is wrapper. Call getattr_recursive rather than getattr
# to avoid a duplicate call to getattr_depth_check.
attr = self.venv.getattr_recursive(name)
else: # attribute not present, child is an unwrapped VecEnv
attr = getattr(self.venv, name)
return attr
def getattr_depth_check(self, name: str, already_found: bool) -> str:
"""See base class.
:return: name of module whose attribute is being shadowed, if any.
"""
all_attributes = self._get_all_attributes()
if name in all_attributes and already_found:
# this venv's attribute is being hidden because of a higher venv.
shadowed_wrapper_class = f"{type(self).__module__}.{type(self).__name__}"
elif name in all_attributes and not already_found:
# we have found the first reference to the attribute. Now check for duplicates.
shadowed_wrapper_class = self.venv.getattr_depth_check(name, True)
else:
# this wrapper does not have the attribute. Keep searching.
shadowed_wrapper_class = self.venv.getattr_depth_check(
name, already_found)
return shadowed_wrapper_class
def copy_obs_dict(obs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""
Deep-copy a dict of numpy arrays.
:param obs: a dict of numpy arrays.
:return: a dict of copied numpy arrays.
"""
assert isinstance(
obs, OrderedDict), f"unexpected type for observations '{type(obs)}'"
return OrderedDict([(k, np.copy(v)) for k, v in obs.items()])
def dict_to_obs(space_: space.EveSpace,
obs_dict: Dict[Any, np.ndarray]) -> VecEnvObs:
"""
Convert an internal representation raw_obs into the appropriate type
specified by space.
:param space: an observation space.
:param obs_dict: a dict of numpy arrays.
:return: returns an observation of the same type as space.
If space is Dict, function is identity; if space is Tuple, converts dict to Tuple;
otherwise, space is unstructured and returns the value raw_obs[None].
"""
if isinstance(space_, space.EveDict):
return obs_dict
elif isinstance(space_, space.EveTuple):
assert len(obs_dict) == len(
space_.spaces
), "size of observation does not match size of observation space"
return tuple((obs_dict[i] for i in range(len(space_.spaces))))
else:
assert set(obs_dict.keys()) == {
None
}, "multiple observation keys for unstructured observation space"
return obs_dict[None]
def obs_space_info(
obs_space: space.EveSpace
) -> Tuple[List[str], Dict[Any, Tuple[int, ...]], Dict[Any, np.dtype]]:
"""
Get dict-structured information about a eve.app.EveSpace.
Dict spaces are represented directly by their dict of subspaces.
Tuple spaces are converted into a dict with keys indexing into the tuple.
Unstructured spaces are represented by {None: obs_space}.
:param obs_space: an observation space
:return: A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, space.EveDict):
assert isinstance(
obs_space.spaces,
OrderedDict), "Dict space must have ordered subspaces"
subspaces = obs_space.spaces
elif isinstance(obs_space, space.EveTuple):
subspaces = {i: space for i, space in enumerate(obs_space.spaces)}
else:
assert not hasattr(
obs_space,
"spaces"), f"Unsupported structured space '{type(obs_space)}'"
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes
class ObsDictWrapper(VecEnvWrapper):
"""
Wrapper for a VecEnv which overrides the observation space for
Hindsight Experience Replay to support dict observations.
:param env: The vectorized environment to wrap.
"""
def __init__(self, venv: VecEnv):
super(ObsDictWrapper, self).__init__(venv, venv.observation_space,
venv.action_space)
self.venv = venv
self.spaces = list(venv.observation_space.spaces.values())
# get dimensions of observation and goal
if isinstance(self.spaces[0], space.EveDiscrete):
self.obs_dim = 1
self.goal_dim = 1
else:
self.obs_dim = venv.observation_space.spaces["observation"].shape[
0]
self.goal_dim = venv.observation_space.spaces[
"achieved_goal"].shape[0]
# new observation space with concatenated observation and (desired) goal
# for the different types of spaces
if isinstance(self.spaces[0], space.EveBox):
low_values = np.concatenate([
venv.observation_space.spaces["observation"].low,
venv.observation_space.spaces["desired_goal"].low
])
high_values = np.concatenate([
venv.observation_space.spaces["observation"].high,
venv.observation_space.spaces["desired_goal"].high
])
self.observation_space = space.EveBox(low_values,
high_values,
dtype=np.float32)
elif isinstance(self.spaces[0], space.EveMultiBinary):
total_dim = self.obs_dim + self.goal_dim
self.observation_space = space.EveMultiBinary(total_dim)
elif isinstance(self.spaces[0], space.EveDiscrete):
dimensions = [
venv.observation_space.spaces["observation"].n,
venv.observation_space.spaces["desired_goal"].n
]
self.observation_space = space.EveMultiDiscrete(dimensions)
else:
raise NotImplementedError(
f"{type(self.spaces[0])} space is not supported")
def reset(self):
return self.venv.reset()
def step_wait(self):
return self.venv.step_wait()
@staticmethod
def convert_dict(observation_dict: Dict[str, np.ndarray],
observation_key: str = "observation",
goal_key: str = "desired_goal") -> np.ndarray:
"""
Concatenate observation and (desired) goal of observation dict.
:param observation_dict: Dictionary with observation.
:param observation_key: Key of observation in dicitonary.
:param goal_key: Key of (desired) goal in dicitonary.
:return: Concatenated observation.
"""
return np.concatenate(
[observation_dict[observation_key], observation_dict[goal_key]],
axis=-1)
class CloudpickleWrapper:
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
:param var: the variable you wish to wrap for pickling with cloudpickle
"""
def __init__(self, var: Any):
self.var = var
def __getstate__(self) -> Any:
return cloudpickle.dumps(self.var)
def __setstate__(self, var: Any) -> None:
self.var = cloudpickle.loads(var)
def _worker(remote: mp.connection.Connection,
parent_remote: mp.connection.Connection,
env_fn_wrapper: CloudpickleWrapper) -> None:
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(
f"`{cmd}` is not implemented in the worker")
except EOFError:
break
EveObs = Union[Tuple, Dict[str, Any], np.ndarray, int]
EveStepReturn = Tuple[EveObs, float, bool, Dict]
class DummyVecEnv(VecEnv):
"""
Creates a simple vectorized wrapper for multiple environments, calling each environment in sequence on the current
Python process. This is useful for computationally simple environment such as ``cartpole-v1``,
as the overhead of multiprocess or multithread outweighs the environment computation time.
This can also be used for RL methods that
require a vectorized environment, but that you want a single environments to train with.
:param env_fns: a list of functions
that return environments to vectorize
"""
def __init__(self, env_fns: List[Callable[[], "EveEnv"]]):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space,
env.action_space)
obs_space = env.observation_space
self.keys, shapes, dtypes = obs_space_info(obs_space)
if isinstance(env.observation_space, space.EveSpace):
buf_obs_shape = (self.num_envs, env.observation_space.max_neurons)
else:
buf_obs_shape = (self.num_envs, )
self.buf_obs = OrderedDict(
[(k, np.zeros(buf_obs_shape + tuple(shapes[k]), dtype=dtypes[k]))
for k in self.keys], )
self.buf_dones = np.zeros((self.num_envs, ), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs, ), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions: np.ndarray) -> None:
self.actions = actions
def step_wait(self) -> VecEnvStepReturn:
for env_idx in range(self.num_envs):
obs, self.buf_rews[env_idx], self.buf_dones[
env_idx], self.buf_infos[env_idx] = self.envs[env_idx].step(
self.actions[env_idx])
if self.buf_dones[env_idx]:
# save final observation where user can get it, then reset
self.buf_infos[env_idx]["terminal_observation"] = obs
obs = self.envs[env_idx].reset()
self._save_obs(env_idx, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews),
np.copy(self.buf_dones), deepcopy(self.buf_infos))
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
seeds = list()
for idx, env in enumerate(self.envs):
seeds.append(env.seed(seed + idx))
return seeds
def reset(self) -> VecEnvObs:
for env_idx in range(self.num_envs):
obs = self.envs[env_idx].reset()
self._save_obs(env_idx, obs)
return self._obs_from_buf()
def close(self) -> None:
for env in self.envs:
env.close()
def _save_obs(self, env_idx: int, obs: VecEnvObs) -> None:
for key in self.keys:
if key is None:
self.buf_obs[key][env_idx] = obs
else:
self.buf_obs[key][env_idx] = obs[key]
def _obs_from_buf(self) -> VecEnvObs:
return dict_to_obs(self.observation_space, copy_obs_dict(self.buf_obs))
def get_attr(self,
attr_name: str,
indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_envs = self._get_target_envs(indices)
return [getattr(env_i, attr_name) for env_i in target_envs]
def set_attr(self,
attr_name: str,
value: Any,
indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_envs = self._get_target_envs(indices)
for env_i in target_envs:
setattr(env_i, attr_name, value)
def env_method(self,
method_name: str,
*method_args,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_envs = self._get_target_envs(indices)
return [
getattr(env_i, method_name)(*method_args, **method_kwargs)
for env_i in target_envs
]
def env_is_wrapped(self,
wrapper_class: Type[Wrapper],
indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_envs = self._get_target_envs(indices)
return [is_wrapped(env_i, wrapper_class) for env_i in target_envs]
def _get_target_envs(self, indices: VecEnvIndices) -> List["EveEnv"]:
indices = self._get_indices(indices)
return [self.envs[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]],
space: space.EveSpace) -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(
obs, (list,
tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, space.EveDict):
assert isinstance(
space.spaces,
OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(
obs[0], dict
), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs]))
for k in space.spaces.keys()])
elif isinstance(space, space.EveTuple):
assert isinstance(
obs[0], tuple
), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self,
env_fns: List[Callable[[], "EveEnv"]],
start_method: Optional[str] = None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(
*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes,
env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
# pytype:disable=attribute-error
process = ctx.Process(target=_worker, args=args, daemon=True)
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(
obs,
self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_attr(self,
attr_name: str,
indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self,
attr_name: str,
value: Any,
indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self,
method_name: str,
*method_args,
indices: VecEnvIndices = None,
**method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(
("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self,
wrapper_class: Type[Wrapper],
indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = ()):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr: np.ndarray) -> None:
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: np.ndarray,
batch_var: np.ndarray, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (
self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def check_for_correct_spaces(env: "EveEnv", observation_space: space.EveSpace,
action_space: space.EveSpace) -> None:
"""
Checks that the environment has same spaces as provided ones. Used by BaseAlgorithm to check if
spaces match after loading the model with given env.
Checked parameters:
- observation_space
- action_space
:param env: Environment to check for valid spaces
:param observation_space: Observation space to check against
:param action_space: Action space to check against
"""
if observation_space != env.observation_space:
raise ValueError(
f"Observation spaces do not match: {observation_space} != {env.observation_space}"
)
if action_space != env.action_space:
raise ValueError(
f"Action spaces do not match: {action_space} != {env.action_space}"
)
class VecNormalize(VecEnvWrapper):
"""
A moving average, normalizing wrapper for vectorized environment.
has support for saving/loading moving average,
:param venv: the vectorized environment to wrap
:param training: Whether to update or not the moving average
:param norm_obs: Whether to normalize observation or not (default: True)
:param norm_reward: Whether to normalize rewards or not (default: True)
:param clip_obs: Max absolute value for observation
:param clip_reward: Max value absolute for discounted reward
:param gamma: discount factor
:param epsilon: To avoid division by zero
"""
def __init__(
self,
venv: VecEnv,
training: bool = True,
norm_obs: bool = True,
norm_reward: bool = True,
clip_obs: float = 10.0,
clip_reward: float = 10.0,
gamma: float = 0.99,
epsilon: float = 1e-8,
):
VecEnvWrapper.__init__(self, venv)
assert isinstance(
self.observation_space, (space.EveBox, space.EveDict)
), "VecNormalize only support `space.EveBox` and `space.EveDict` observation spaces"
if isinstance(self.observation_space, space.EveDict):
self.obs_keys = set(self.observation_space.spaces.keys())
self.obs_spaces = self.observation_space.spaces
self.obs_rms = {
key: RunningMeanStd(shape=space.shape)
for key, space in self.obs_spaces.items()
}
else:
self.obs_keys, self.obs_spaces = None, None
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.ret_rms = RunningMeanStd(shape=())
self.clip_obs = clip_obs
self.clip_reward = clip_reward
# Returns: discounted rewards
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.training = training
self.norm_obs = norm_obs
self.norm_reward = norm_reward
self.old_obs = np.array([])
self.old_reward = np.array([])
def __getstate__(self) -> Dict[str, Any]:
"""
Gets state for pickling.
Excludes self.venv, as in general VecEnv's may not be pickleable."""
state = self.__dict__.copy()
# these attributes are not pickleable
del state["venv"]
del state["class_attributes"]
# these attributes depend on the above and so we would prefer not to pickle
del state["ret"]
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Restores pickled state.
User must call set_venv() after unpickling before using.
:param state:"""
self.__dict__.update(state)
assert "venv" not in state
self.venv = None
def set_venv(self, venv: VecEnv) -> None:
"""
Sets the vector environment to wrap to venv.
Also sets attributes derived from this such as `num_env`.
:param venv:
"""
if self.venv is not None:
raise ValueError(
"Trying to set venv of already initialized VecNormalize wrapper."
)
VecEnvWrapper.__init__(self, venv)
# Check only that the observation_space match
check_for_correct_spaces(venv, self.observation_space,
venv.action_space)
self.ret = np.zeros(self.num_envs)
def step_wait(self) -> VecEnvStepReturn:
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step_wait()
self.old_obs = obs
self.old_reward = rews
if self.training:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
self.obs_rms[key].update(obs[key])
else:
self.obs_rms.update(obs)
obs = self.normalize_obs(obs)
if self.training:
self._update_reward(rews)
rews = self.normalize_reward(rews)
self.ret[news] = 0
return obs, rews, news, infos
def _update_reward(self, reward: np.ndarray) -> None:
"""Update reward normalization statistics."""
self.ret = self.ret * self.gamma + reward
self.ret_rms.update(self.ret)
def _normalize_obs(self, obs: np.ndarray,
obs_rms: RunningMeanStd) -> np.ndarray:
"""
Helper to normalize observation.
:param obs:
:param obs_rms: associated statistics
:return: normalized observation
"""
return np.clip(
(obs - obs_rms.mean) / np.sqrt(obs_rms.var + self.epsilon),
-self.clip_obs, self.clip_obs)
def _unnormalize_obs(self, obs: np.ndarray,
obs_rms: RunningMeanStd) -> np.ndarray:
"""
Helper to unnormalize observation.
:param obs:
:param obs_rms: associated statistics
:return: unnormalized observation
"""
return (obs * np.sqrt(obs_rms.var + self.epsilon)) + obs_rms.mean
def normalize_obs(
self, obs: Union[np.ndarray, Dict[str, np.ndarray]]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Normalize observations using this VecNormalize's observations statistics.
Calling this method does not update statistics.
"""
# Avoid modifying by reference the original object
obs_ = deepcopy(obs)
if self.norm_obs:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
obs_[key] = self._normalize_obs(
obs[key], self.obs_rms[key]).astype(np.float32)
else:
obs_ = self._normalize_obs(obs,
self.obs_rms).astype(np.float32)
return obs_
def normalize_reward(self, reward: np.ndarray) -> np.ndarray:
"""
Normalize rewards using this VecNormalize's rewards statistics.
Calling this method does not update statistics.
"""
if self.norm_reward:
reward = np.clip(reward / np.sqrt(self.ret_rms.var + self.epsilon),
-self.clip_reward, self.clip_reward)
return reward
def unnormalize_obs(
self, obs: Union[np.ndarray, Dict[str, np.ndarray]]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
# Avoid modifying by reference the original object
obs_ = deepcopy(obs)
if self.norm_obs:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
obs_[key] = self._unnormalize_obs(obs[key],
self.obs_rms[key])
else:
obs_ = self._unnormalize_obs(obs, self.obs_rms)
return obs_
def unnormalize_reward(self, reward: np.ndarray) -> np.ndarray:
if self.norm_reward:
return reward * np.sqrt(self.ret_rms.var + self.epsilon)
return reward
def get_original_obs(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Returns an unnormalized version of the observations from the most recent
step or reset.
"""
return deepcopy(self.old_obs)
def get_original_reward(self) -> np.ndarray:
"""
Returns an unnormalized version of the rewards from the most recent step.
"""
return self.old_reward.copy()
def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Reset all environments
:return: first observation of the episode
"""
obs = self.venv.reset()
self.old_obs = obs
self.ret = np.zeros(self.num_envs)
if self.training:
self._update_reward(self.ret)
return self.normalize_obs(obs)
@staticmethod
def load(load_path: str, venv: VecEnv) -> "VecNormalize":
"""
Loads a saved VecNormalize object.
:param load_path: the path to load from.
:param venv: the VecEnv to wrap.
:return:
"""
with open(load_path, "rb") as file_handler:
vec_normalize = pickle.load(file_handler)
vec_normalize.set_venv(venv)
return vec_normalize
def save(self, save_path: str) -> None:
"""
Save current VecNormalize object with
all running statistics and settings (e.g. clip_obs)
:param save_path: The path to save to
"""
with open(save_path, "wb") as file_handler:
pickle.dump(self, file_handler)
################################
# MONITOR
################################
class Monitor(Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: The environment
:param filename: the location to save a log file, can be None for no log
:param allow_early_resets: allows the reset of the environment before it is done
:param reset_keywords: extra keywords for the reset call,
if extra parameters are needed at reset
:param info_keywords: extra information to log, from the information return of env.step()
"""
EXT = "monitor.csv"
def __init__(
self,
env: "EveEnv",
filename: Optional[str] = None,
allow_early_resets: bool = True,
reset_keywords: Tuple[str, ...] = (),
info_keywords: Tuple[str, ...] = (),
):
super(Monitor, self).__init__(env=env)
self.t_start = time.time()
if filename is None:
self.file_handler = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.file_handler = open(filename, "wt")
self.file_handler.write(
"#%s\n" % json.dumps({
"t_start": self.t_start,
"env_id": env.spec and env.spec.id
}))
self.logger = csv.DictWriter(self.file_handler,
fieldnames=("r", "l", "t") +
reset_keywords + info_keywords)
self.logger.writeheader()
self.file_handler.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {
} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs) -> EveObs:
"""
Calls the environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError(
"Expected you to pass kwarg {} into reset".format(key))
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action: Union[np.ndarray, int]) -> EveStepReturn:
"""
Step the environment with the given action
:param action: the action
:return: observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
self.rewards.append(reward)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
ep_len = len(self.rewards)
ep_info = {
"r": round(ep_rew, 6),
"l": ep_len,
"t": round(time.time() - self.t_start, 6)
}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(ep_len)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.logger:
self.logger.writerow(ep_info)
self.file_handler.flush()
info["episode"] = ep_info
self.total_steps += 1
return observation, reward, done, info
def close(self) -> None:
"""
Closes the environment
"""
super(Monitor, self).close()
if self.file_handler is not None:
self.file_handler.close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return:
"""
return self.total_steps
def get_episode_rewards(self) -> List[float]:
"""
Returns the rewards of all the episodes
:return:
"""
return self.episode_rewards
def get_episode_lengths(self) -> List[int]:
"""
Returns the number of timesteps of all the episodes
:return:
"""
return self.episode_lengths
def get_episode_times(self) -> List[float]:
"""
Returns the runtime in seconds of all the episodes
:return:
"""
return self.episode_times
class LoadMonitorResultsError(Exception):
"""
Raised when loading the monitor log fails.
"""
pass
def get_monitor_files(path: str) -> List[str]:
"""
get all the monitor files in the given path
:param path: the logging folder
:return: the log files
"""
return glob(os.path.join(path, "*" + Monitor.EXT))
def load_results(path: str) -> pandas.DataFrame:
"""
Load all Monitor logs from a given directory path matching ``*monitor.csv``
:param path: the directory path containing the log file(s)
:return: the logged data
"""
monitor_files = get_monitor_files(path)
if len(monitor_files) == 0:
raise LoadMonitorResultsError(
f"No monitor files of the form *{Monitor.EXT} found in {path}")
data_frames, headers = [], []
for file_name in monitor_files:
with open(file_name, "rt") as file_handler:
first_line = file_handler.readline()
assert first_line[0] == "#"
header = json.loads(first_line[1:])
data_frame =
|
pandas.read_csv(file_handler, index_col=None)
|
pandas.read_csv
|
from .base import Transformer
import os
import pandas as pd
import numpy as np
from datetime import datetime
ISO_COUNTRY_CODES = os.path.join(os.path.dirname(__file__), 'countrycodes.csv')
# Sub-Saharan country codes which are of interest for migration ORIGINS
SOURCES = ['Burundi', 'Comoros', 'Djibouti',
'Eritrea', 'Ethiopia', 'French Southern Territories', 'Kenya', 'Madagascar',
'Malawi', 'Mauritius', 'Mayotte', 'Mozambique', 'Réunion', 'Rwanda', 'Seychelles',
'Somalia', 'South Sudan', 'United Republic of Tanzania', 'Uganda', 'Zambia', 'Zimbabwe']
# Destination of interest
DESTINATIONS = ['Italy', 'Sweden', 'Denmark', 'United Kingdom', 'Saudi Arabia', 'South Africa']
class MixedMigrationTransformer(Transformer):
"""
Generates the target forecast variable for each destination cluster.
Based on target forecast variable from ETH (Ethiopia) to the destination countries.
However, since we use data migratory sources from other Sub-Saharan Countries as well,
these are also encoded as the target variable.
Specifically, the target variable 'TARGET.ETH.TO.EU' that denotes annual mixed migration
flow from Ethiopia to Europe, will also have flows from Somalia to EU for a row with that
country code.
year,value,Country Name,Country Code,Indicator Name,Indicator Code
1980,218.0,Ethiopia,ETH,Mixed Migration ETH to Europe,TARGET.ETH.TO.EU ---> Flows form ETH to EU
1981,376.0,Somalia,SOM,Mixed Migration ETH to Europe,TARGET.ETH.TO.EU ---> Flows from SOM to EU
"""
def __init__(self, source, target):
super().__init__(source, target)
self.iso = pd.read_csv(ISO_COUNTRY_CODES,
usecols=[0, 2],
names=['name', 'iso3'],
header=0)
# Fix naming difference
self.iso.at[self.iso.name == "United Kingdom of Great Britain and Northern Ireland", 'name'] = "United Kingdom"
def read(self):
""" Overloaded method, since we have multiple sources """
self.unhcr = pd.read_csv(self.source[0],
skiprows=4,
na_values='*',
names=['year', 'target', 'source', 'type', 'value'],
dtype={'year': np.int32,
'value': np.float})
self.yemen = pd.read_csv(self.source[1])
self.undesa = pd.read_excel(self.source[2],
sheet_name='Table 1',
header=15,
skipfooter=26,
na_values='..')
def __interpolate(self):
""" A linear interpolation for UNDESA data which is every 5 years """
results = []
base_years = set(np.arange(min(self.undesa.year), 2017, 1))
for s in SOURCES:
for d in DESTINATIONS:
# fetch the time series for this pair
c1 = self.undesa.target == d
c2 = self.undesa.source == s
# Assume that the UNDESA was consistent across years when
# it considered Refugee numbers to be part of the migration stock
R = any(self.undesa.loc[c1 & c2, 'R'])
# A temporary frame to do the interpolation
ts = pd.DataFrame({'target': d,
'source': s,
'R': R,
'year': self.undesa.loc[c1 & c2, 'year'],
'migration': self.undesa.loc[c1 & c2, 'migration']})
if len(ts) >= 3:
# only consider country pairs with at least 3 observations
# years to interpolate
interyears = list(base_years - set(ts.year.unique()))
tr = pd.DataFrame({'target': [d for i in range(len(interyears))],
'source': [s for i in range(len(interyears))],
'R': [R for i in range(len(interyears))],
'year': interyears,
'migration': [np.nan for i in range(len(interyears))]
})
ts = ts.append(tr, ignore_index=True)
# do the interpolation
ts.sort_values(by='year', inplace=True)
ts.set_index('year', inplace=True)
ts.migration.interpolate(inplace=True)
results.append(ts)
else:
print("{} -> {} has {} observations. Ignoring".format(s, d, len(ts)))
val = pd.concat(results)
val.reset_index(inplace=True)
return val
def __undesa_transform(self):
""" UNDESA data is for every 5 years, so we interpolate """
# For some reason, there is a nan-index at the end when read in
# so drop the last value
self.undesa = self.undesa[:-1]
print("UNDESA migration matrix with {} rows.".format(len(self.undesa)))
# Excel reader doesn't read some of the headers
headers = ["Year", "Sort order", "Destination", "Notes", "Code", "Type of data"]
mapper = {"Unnamed: {}".format(k): v for k, v in zip(range(0, 6), headers)}
self.undesa.rename(columns=mapper, inplace=True)
# Remove the multi index for now - and treat them as columns
self.undesa = self.undesa.reset_index()
self.undesa.drop(columns=["Sort order",
"Notes",
"Code",
"Total",
"Other North",
"Other South"], inplace=True)
# Some of the UNDESA migration numbers include the UNHCR numbers
# This is indicated by the code "R" in the "type of data"
# To avoid duplication at generating the target variables,
# we use "R" as a flag to mark specific entries that
# include migration numbers
self.undesa['R'] = self \
.undesa['Type of data'] \
.apply(lambda x: True if 'R' in str(x) else False)
self.undesa.drop(columns=['Type of data'], inplace=True)
# Transform from matrix to long form
self.undesa = self.undesa.melt(id_vars=['Year', 'Destination', 'R'],
var_name='source',
value_name='migration')
# conform to the other sources
self.undesa.rename(columns={'Destination': 'target'}, inplace=True)
self.undesa['year'] = self.undesa['Year'].astype(int)
self.undesa.drop(columns=['Year'], inplace=True)
self.undesa = self.undesa[['year', 'source', 'target', 'R', 'migration']]
print("UNDESA long form data with {} rows.".format(len(self.undesa)))
# Filter based on sources and destinations
c1 = self.undesa.source.isin(SOURCES)
c2 = self.undesa.target.isin(DESTINATIONS)
self.undesa = self.undesa[c1 & c2]
# Remove any nulls
c3 = self.undesa.migration.isnull()
self.undesa.migration[c3] = 0.0
print("UNDESA data for SOURCE/DESTINATION countries with {} rows.".format(len(self.undesa)))
# Handle interpolation (linear for now)
self.undesa = self.__interpolate()
# EDIT - after the Dublin workshop, August 2018
# UNDESA stats are for migrant stock. We need to derive flows.
# Using a simplifying assumption:
#
# flow(t) = stock(t) - stock (t-1)
#
# Note there are other methods like Abel et al. (2016), which may
# be more accurate here.
self.undesa['migration'] = self.undesa.groupby(['source', 'target'])['migration'].transform(self.__get_flows)
c1 = self.undesa.migration.isnull()
self.undesa = self.undesa[~c1]
def __get_flows(self, x):
""" Helper script to compute flows from migration stock """
k = x.diff()
k[k < 0] = 0 # flows of interest are positive
return k
def __unhcr_transformer(self):
# Handle NA values
print("UNHCR data with {} rows.".format(len(self.unhcr)))
self.unhcr.replace([np.inf, -np.inf], np.nan)
self.unhcr = self.unhcr[~
|
pd.isnull(self.unhcr.value)
|
pandas.isnull
|
#!/usr/bin/env python3
import random, os, sys, logging, re
import pandas as pd
from Bio import SeqIO
try:
from Bio.Alphabet import generic_dna, IUPAC
Bio_Alphabet = True
except ImportError:
Bio_Alphabet = None
# usages of generic_dna, IUPAC are not supported in Biopython 1.78 (September 2020).
print(f"The installed BioPython is a new version that has removed the Alphabet module.",file=sys.stderr)
import numpy as np
from itertools import combinations, product
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.stats import sem
def writeOutputSeq(col_ID, col_seq, output_file, db):
"""
Generate output fasta file using db dataframe
"""
output_handle = open(output_file, 'w')
for key, row in db.iterrows():
output_handle.write('>%s\n%s\n' % (row[col_ID], row[col_seq]))
protein_handle.close()
def collapse_fasta(prefix):
print(prefix)
# parse joined reads
seq_dict_joined = getInputSeq(prefix + '_join.raw.fa')
seq_id_dict = {}
for id in seq_dict_joined:
seq = seq_dict_joined[id]
if seq not in seq_id_dict:
seq_id_dict[seq] = [id]
else:
seq_id_dict[seq].append(id)
fjoined = open((prefix + '_join.fa'), 'w')
for seq in seq_id_dict:
fjoined.write('>%s\n%s\n' % ('_'.join(seq_id_dict[seq]), seq))
fjoined.close()
# parse unjoined
seq_dict_R1 = getInputSeq(prefix + '_unjoinR1.raw.fa')
seq_dict_R2 = getInputSeq(prefix + '_unjoinR2.raw.fa')
seq_id_dict_R1R2 = {} # concated seq: [seq IDs]
for id in seq_dict_R1:
concat_seq = seq_dict_R1[id] + seq_dict_R2[id]
if concat_seq not in seq_id_dict_R1R2:
seq_id_dict_R1R2[concat_seq] = [id]
else:
seq_id_dict_R1R2[concat_seq].append(id)
fR1 = open(prefix + '_unjoinR1.fa', 'w')
fR2 = open(prefix + '_unjoinR2.fa', 'w')
for seq in seq_id_dict_R1R2:
fR1.write('>%s\n%s\n' % ('_'.join(seq_id_dict_R1R2[seq]), seq_dict_R1[seq_id_dict_R1R2[seq][0]]))
fR2.write('>%s\n%s\n' % ('_'.join(seq_id_dict_R1R2[seq]), seq_dict_R2[seq_id_dict_R1R2[seq][0]]))
fR1.close()
fR2.close()
def getInputSeq(seq_file):
"""
Arguments:
seq_file = a fasta file of sequences input
Returns:
a dictionary of {ID:Seq}
"""
### add print message to warn for empty dict
if not os.path.exists(seq_file):
print("[getInputSeq] %s FAILED TO LOAD. EMPTY DICT IS RETURNED. THIS MAY INFLUENCE YOUR RESULTS" % seq_file, file=sys.stderr, flush=True)
return {}
if seq_file.endswith('.gz'):
os.system('gunzip %s' % seq_file)
seq_file_unzip = seq_file.rstrip('.gz')
else:
seq_file_unzip = seq_file
if Bio_Alphabet:
seq_dict = SeqIO.index(seq_file_unzip, "fasta", IUPAC.ambiguous_dna)
else:
seq_dict = SeqIO.index(seq_file_unzip, "fasta")
# Create a seq_dict ID translation using IDs truncate up to space or 50 chars
seqs = {}
for seq in seq_dict.values():
seqs.update({seq.description: str(seq.seq).upper()})
### .fa files may have a header preceeding each gene. This chunk is added to make sure the header is removed
### can't change the brackets, otherwise keyerror
keys = list(seqs.keys())
# obtain a list of keys stripped of the header
for i in range(len(keys)):
keys[i] = keys[i].replace("lcl|", "", 1)
seqs = dict(zip(keys, list(seqs.values())))
if seq_file.endswith('.gz'):
os.system('gzip %s' % seq_file_unzip)
return seqs
def getCDR(cdrfile):
V_CDR = {}
if not os.path.exists(cdrfile):
logging.warnings('Cannot find CDR boundary file %s' % os.path.basename(cdrfile))
return None
else:
for line in open(cdrfile):
l = line.strip().split()
V_CDR[l[0]] = [int(b) for b in l[1:]]
return V_CDR
def getCSV(csvfile):
# Load CSV file by reading by chunks
tmplist = []
for chunk in pd.read_csv(csvfile, sep='\t', chunksize=20000):
tmplist.append(chunk)
m = pd.concat(tmplist, axis=0)
del tmplist
return m
def load_Valign(fname):
# Load V gene genome alignment position
V_align = {}
for line in open(fname):
l = line.strip().split()
start_end = '%s_%s' % (l[2], l[3])
if l[0] not in V_align:
V_align[l[0]] = {l[1]: [start_end]}
else:
if l[1] in V_align[l[0]]:
V_align[l[0]][l[1]].append(start_end)
else:
V_align[l[0]][l[1]] = [start_end]
return V_align
def CheckAlignOverlap(topinfo, reads_align, Valign, genomealign, hitcheck):
flag = 'noneed'
if genomealign == 'T':
flag = 'unmatch'
if topinfo[0] not in reads_align:
flag = 'nohit'
else:
for loc in reads_align[topinfo[0]]:
chrom = loc[0]
pos = int(loc[1])
if topinfo[1] not in Valign:
flag = 'noVhit'
continue
if chrom in Valign[topinfo[1]]:
for start_end in Valign[topinfo[1]][chrom]:
start = int(start_end.split('_')[0])
end = int(start_end.split('_')[1])
# extend 10bp at 5' because V-D or V-J junctions might have matches
if (start - 10) <= pos <= end:
flag = 'match'
if flag == 'nohit':
return 'No_hit_from_genome_alignment'
elif flag == 'noVhit':
return 'topVgene_has_no_alignment'
elif flag == 'unmatch':
return 'genome_alignment_unmatch_Vgene'
else:
return hitcheck
def loggingRun(cmdline):
logging.info(cmdline)
os.system(cmdline)
def line_count(fname):
i = -1
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def fasta_count(fname):
i = 0
fin = open(fname)
for line in fin:
if line.startswith('>'):
i += 1
fin.close()
return i
def reads_stat(args):
for sample in sample_info:
eachdir = '%s/%s' % (args.outdir, sample)
fstat = open('%s/%s.stat.txt' % (eachdir, sample), 'w')
# reads count
total_num = line_count("%s/%s_R1.fq" % (eachdir, sample)) / 4
join_num = line_count("%s/%s_join.fq" % (eachdir, sample)) / 4
unjoin_num = total_num - join_num
fstat.write('Total reads\t%d\nJoined reads\t%d\nUnjoined reads\t%d\n' % (total_num, join_num, unjoin_num))
# alignment stat
join_uniq = line_count('%s/%s_join.uniq.xls' % (eachdir, sample))
R1_uniq = line_count('%s/%s_unjoinR1.uniq.xls' % (eachdir, sample))
join_NOuniq = line_count('%s/%s_join.NOuniq.xls' % (eachdir, sample))
R1_NOuniq = line_count('%s/%s_unjoinR1.NOuniq.xls' % (eachdir, sample))
mergeNum = line_count('%s/%s.IgBlast_merge.xls' % (eachdir, sample))
fstat.write('# of uniquely/NON-uniquely joined hits\t%d\t%d\n' % (join_uniq, join_NOuniq))
fstat.write('# of uniquely/NON-uniquely unjoined-R1 hits\t%d\t%d\n' % (R1_uniq, R1_NOuniq))
fstat.write('# of merged hits\t%d\n' % mergeNum)
fstat.close()
def random_seq(length):
''' Generate random sequnce with input length '''
seq = ''
if length == 0:
return seq
else:
seq = ''.join([random.choice('ATCG') for i in range(0, length)])
return seq
def mutate_seq(orig_string, mutation_rate=0.005):
''' Mutate input sequence with point mutations '''
bases = "ACGT"
result = []
mutations = []
n = 0
for base in orig_string:
n += 1
if random.random() < mutation_rate and base in bases:
new_base = bases[bases.index(base) - random.randint(1, 3)] # negatives are OK
result.append(new_base)
mutations.append('%s%d%s' % (base, n, new_base))
else:
result.append(base)
return "".join(result), '|'.join(mutations)
def reverse_complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
seq_rc = "".join(complement.get(base, base) for base in reversed(seq))
return seq_rc
def fastq_stats(fastqfile):
# execute fastq-stats
os.system('fastq-stats %s > %s.fastqstat' % (fastqfile, fastqfile))
# parse results
fqstat = {}
for line in open('%s.fastqstat' % fastqfile):
l = line.strip().split('\t')
fqstat[l[0]] = l[1]
os.system('rm -rf %s.fastqstat' % fastqfile)
return fqstat
def parsefa_long(file, length):
id_seq = {}
id = ''
for line in open(file):
if line.startswith('>'):
id = line.strip()
id_seq[id] = ''
else:
id_seq[id] += seq
fout = open(file.replace('.fa', '.long.fa'), 'w')
for id in id_seq:
if len(id_seq[id]) >= length:
fout.write('%s\n%s\n' % (id, id_seq[id]))
fout.close()
def smooth(self, nucMutnum, nucCovnum, genetotalseq, statfilelist):
# print(nucMutnum['A'], nucMutnum['G'], nucMutnum['C'], nucMutnum['T'])
# print(nucCovnum['A'], nucCovnum['G'], nucCovnum['C'], nucCovnum['T'])
nucMucratio = {}
smoothpower = self.args.additivesmooth
for nuc in 'AGCT':
nucMucratio[nuc] = float(nucMutnum[nuc]) / nucCovnum[nuc]
avecover = sum([nucCovnum[a] for a in 'AGCT']) / len(genetotalseq)
for gene in statfilelist:
statfile = statfilelist[gene]
statnew = statfile.replace('.txt', '.sm%s.txt' % str(smoothpower))
fnew = open(statnew, 'w')
for line in open(statfile):
if line.startswith('Pos'):
fnew.write(line)
else:
l = line.strip().split('\t')
total_smooth = int(l[2]) + avecover * smoothpower
mut_smooth = int(l[1]) + nucMucratio[nuc] * avecover * smoothpower
if total_smooth == 0:
l[4] = 0
else:
l[4] = mut_smooth / total_smooth
l[4] = str(l[4])
fnew.write('%s\n' % '\t'.join(l))
fnew.close()
pdffile = statnew.replace('nucfile', 'profiles').replace('txt', 'pdf')
### 09152020: changed showsequence from false to ture
loggingRun(
'Rscript scripts/SHMPlot2.R %s %s plotrows=1 figureheight=2 showsequence=TRUE ymax=0.2 cdr1_start=%d cdr1_end=%d cdr2_start=%d cdr2_end=%d cdr3_start=%d cdr3_end=%d' % \
(statnew, pdffile, self.V_CDR[gene]['CDR1_start'], self.V_CDR[gene]['CDR1_end'], \
self.V_CDR[gene]['CDR2_start'], self.V_CDR[gene]['CDR2_end'], \
self.V_CDR[gene]['CDR3_start'], self.V_CDR[gene]['CDR3_end']))
######### this section is for tree construction & file parsing
def mergeSampleCount(shortlist):
samplelist = [s.split(':')[0] for s in shortlist[0].split('|')]
sample_count = {}
for s in samplelist:
sample_count[s] = 0
for shortcount in shortlist:
for oneshort in shortcount.split('|'):
(a, b) = oneshort.split(':')
sample_count[a] = sample_count[a] + int(b)
o = '|'.join(["%s:%d" % (a, sample_count[a]) for a in samplelist])
return o
def treeCollapseParse(fin, fout):
db = pd.read_csv(fin, sep="\t", low_memory=False)
if len(db) < 2: sys.exit('Find no passed read in tmp_db-pass.tab')
grouped = db.groupby('CLONE')
idlist = []
sclist = []
readlist = []
fullseqlist = []
for key, group in grouped:
seqlist = []
group = pd.DataFrame(group)
germseq = list(group['GERMLINE_IMGT_D_MASK'])[0]
for si in group['SEQUENCE_IMGT']:
s = []
for n in range(0, len(si)):
if si[n] in ['N', '.'] and germseq[n] != 'N':
s.append(germseq[n])
else:
s.append(si[n])
seqlist.append(''.join(s))
group["FULLSEQ"] = seqlist
grouped2 = group.groupby("FULLSEQ")
for subkey, subgroup in grouped2:
subgroup = pd.DataFrame(subgroup)
subgroup["trimlen"] = [len(s.replace('.', '').replace('N', '')) for s in subgroup['SEQUENCE_IMGT']]
subgroup = subgroup.sort_values("trimlen", ascending=False)
idlist.append(list(subgroup['SEQUENCE_ID'])[0])
fullseqlist.append(list(subgroup['FULLSEQ'])[0])
readlist.append('|'.join(list(subgroup['SEQUENCE_ID'])))
sclist.append(mergeSampleCount(list(subgroup['SHORTCOUNT'])))
treeCollapse = pd.DataFrame(db.loc[db['SEQUENCE_ID'].isin(idlist),])
treeCollapse["SHORTCOUNT"] = sclist
# treeCollapse["SEQUENCE_IMGT"] = fullseqlist
# treeCollapse["READGROUP"] = readlist
treeCollapse.to_csv(fout, sep="\t", index=False)
def files_process(args, worktype):
# IgBlast clean up
if worktype == 'igblast_clean':
for sample in args.metadict:
eachdir = '%s/%s' % (args.outdir, sample)
dirlist = ['reads_fasta', 'reads_fastq', 'igblast_raw',
'igblast_db'] # , 'bowtie_sam']
for d in dirlist:
if not os.path.exists('%s/%s' % (eachdir, d)):
os.system('mkdir %s/%s' % (eachdir, d))
os.system('mv {0}/*fa {0}/reads_fasta'.format(eachdir))
os.system('mv {0}/*.fq {0}/*list {0}/reads_fastq'.format(eachdir))
os.system('mv {0}/*IgBlast {0}/igblast_raw'.format(eachdir))
os.system('mv {0}/*IgBlast.db {0}/igblast_db'.format(eachdir))
# if args.genomealign == 'T':
# os.system('mv %s/*.sam %s/bowtie_sam' % (eachdir, eachdir))
# JH 05042021
# os.system('gzip %s/reads_fast*/*' % (eachdir))
os.system('gzip -f %s/reads_fasta/*.fa' % (eachdir))
os.system('gzip -f %s/reads_fastq/*.fq' % (eachdir))
os.system('gzip -f %s/reads_fastq/*.list' % (eachdir))
# os.system('gzip %s/igblast/*' % eachdir)
os.system('gzip -f %s/igblast_db/*.IgBlast.db' % eachdir)
os.system('gzip -f %s/igblast_raw/*.IgBlast' % eachdir)
if os.path.exists('%s/unmatched/' % args.outdir):
os.system('gzip -q %s/unmatched/*' % args.outdir)
def getNmers(sequences, n):
"""
Breaks input sequences down into n-mers
Arguments:
sequences : List of sequences to be broken into n-mers
n : Length of n-mers to return
n == 1
Returns:
dict : Dictionary mapping sequence to a list of n-mers
"""
# Add Ns so first nucleotide is center of first n-mer
sequences_n = ['N' * ((n - 1) // 2) + seq + 'N' * ((n - 1) // 2) for seq in sequences]
nmers = {}
for seq, seqn in zip(sequences, sequences_n):
nmers[seq] = [seqn[i:i + n] for i in range(len(seqn) - n + 1)]
# nmers = {(seq, [seqn[i:i+n] for i in range(len(seqn)-n+1)]) for seq,seqn in izip(sequences,sequences_n)}
return nmers
def scoreDNA(a, b, mask_score=None, gap_score=None):
"""
Returns the score for a pair of IUPAC Ambiguous Nucleotide characters
Arguments:
a : First characters
b : Second character
n_score : Tuple of length two defining scores for all matches against an N
character for (a, b), with the score for character (a) taking precedence;
if None score symmetrically according to IUPAC character identity
gap_score : Tuple of length two defining score for all matches against a gap (-, .)
character for (a, b), with the score for character (a) taking precedence;
if None score symmetrically according to IUPAC character identity
Returns:
int : Score for the character pair
"""
# Define ambiguous character translations
IUPAC_trans = {'AGWSKMBDHV': 'R', 'CTSWKMBDHV': 'Y', 'CGKMBDHV': 'S', 'ATKMBDHV': 'W', 'GTBDHV': 'K',
'ACBDHV': 'M', 'CGTDHV': 'B', 'AGTHV': 'D', 'ACTV': 'H', 'ACG': 'V', 'ABCDGHKMRSTVWY': 'N',
'-.': '.'}
# Create list of tuples of synonymous character pairs
IUPAC_matches = [p for k, v in IUPAC_trans.items() for p in list(product(k, v))]
# Check gap and N-value conditions, prioritizing score for first character
if gap_score is not None and a in '-.':
return gap_score[0]
elif mask_score is not None and a in 'nN':
return mask_score[0]
elif gap_score is not None and b in '-.':
return gap_score[1]
elif mask_score is not None and b in 'nN':
return mask_score[1]
# Return symmetric and reflexive score for IUPAC match conditions
if a == b:
return 1
elif (a, b) in IUPAC_matches:
return 1
elif (b, a) in IUPAC_matches:
return 1
else:
return 0
def getDNADistMatrix(mat=None, mask_dist=0, gap_dist=0):
"""
Generates a DNA distance matrix
Specifies a matrix of distance scores eg A==A=> similarity score=1 / distance =0
Arguments:
mat : Input distance matrix to extend to full alphabet;
if unspecified, creates Hamming distance matrix that incorporates
IUPAC equivalencies
mask_dist : Distance for all matches against an N character
gap_dist : Distance for all matches against a gap (-, .) character
Returns:
DataFrame : pandas.DataFrame of distances
"""
IUPAC_chars = list('-.ACGTRYSWKMBDHVN')
mask_char = 'N'
# Default matrix to inf
dist_mat = pd.DataFrame(float('inf'), index=IUPAC_chars, columns=IUPAC_chars,
dtype=float)
# Set gap distance
for c in '-.':
dist_mat.loc[c] = dist_mat.loc[:, c] = gap_dist
# Set mask distance
dist_mat.loc[mask_char] = dist_mat.loc[:, mask_char] = mask_dist
# Fill in provided distances from input matrix
if mat is not None:
for i, j in product(mat.index, mat.columns):
dist_mat.at[i, j] = mat.at[i, j]
# If no input matrix, create IUPAC-defined Hamming distance
else:
for i, j in product(dist_mat.index, dist_mat.columns):
dist_mat.at[i, j] = 1 - scoreDNA(i, j,
mask_score=(1 - mask_dist, 1 - mask_dist),
gap_score=(1 - gap_dist, 1 - gap_dist))
return dist_mat
pass
def calcDistances(sequences, n, dist_mat, norm, sym):
"""
Calculate pairwise distances between input sequences
Arguments:
sequences : List of sequences for which to calculate pairwise distances
n : Length of n-mers to be used in calculating distance
dist_mat : pandas.DataFrame of mutation distances
norm : Normalization method
sym : Symmetry method
Returns:
ndarray : numpy matrix of pairwise distances between input sequences
"""
# Initialize output distance matrix
dists = np.zeros((len(sequences), len(sequences)))
# Generate dictionary of n-mers from input sequences
nmers = getNmers(sequences, n)
# Iterate over combinations of input sequences
for j, k in combinations(list(range(len(sequences))), 2):
# Only consider characters and n-mers with mutations
# nmer==seq == [list of bases in seqs_uniq]
# mutated==where seq1 != seq2
# in our case no need for dist_mat;add the number of diff aa and norm by aa len=>distance
mutated = [i for i, (c1, c2) in enumerate(zip(sequences[j], sequences[k])) if c1 != c2]
seq1 = [sequences[j][i] for i in mutated]
seq2 = [sequences[k][i] for i in mutated]
nmer1 = [nmers[sequences[j]][i] for i in mutated]
nmer2 = [nmers[sequences[k]][i] for i in mutated]
# Determine normalizing factor
if norm == 'len':
norm_by = len(sequences[0])
elif norm == 'mut':
norm_by = len(mutated)
else:
norm_by = 1
# Determine symmetry function
if sym == 'avg':
sym_fun = np.mean
elif sym == 'min':
sym_fun = min
else:
sym_fun = sum
# Calculate distances
try:
dists[j, k] = dists[k, j] = \
sum([sym_fun([dist_mat.at[c1, n2], dist_mat.at[c2, n1]]) \
for c1, c2, n1, n2 in zip(seq1, seq2, nmer1, nmer2)]) / \
(norm_by)
except (KeyError):
raise KeyError('Unrecognized character in sequence.')
return dists
def formClusters(dists, link, distance):
"""
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance
Arguments:
dists : numpy matrix of distances
link : Linkage type for hierarchical clustering
distance : Distance at which to cut into clusters
Returns:
list : List of cluster assignments
"""
# Make distance matrix square
# squareform turns square matrix to vector, or vector to square matrix
dists = squareform(dists)
# Compute linkage
links = linkage(dists, link)
# Break into clusters based on cutoff
clusters = fcluster(links, distance, criterion='distance')
return clusters
def hier_clust(group, distance):
"""
distance = 0.1 in Yuxiang/Huan
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance
"""
# This line was never used --> commented out JH 06032021
# dict_seqID = group.set_index('CDR3_MASK').to_dict()['SEQUENCE_ID']
seqs = group['CDR3_MASK'].tolist()
IDs = group['SEQUENCE_ID'].tolist()
seqs_uniq = list(set(seqs))
seq_map = {}
for key, row in group.iterrows():
seq = row['CDR3_MASK']
ID = row['SEQUENCE_ID']
seq_map.setdefault(seq, []).append(ID)
if len(seqs_uniq) == 1:
clone_tmp = [IDs[0] for i in range(len(IDs))]
else:
# dist_mat is a scoring matrix that specifies the distance bewteen pairs of chars
dist_mat = getDNADistMatrix(mask_dist=0, gap_dist=0)
dists = calcDistances(seqs_uniq, 1, dist_mat, 'len', 'avg')
# Perform hierarchical clustering
lineage = 'single' # the shorted distance
clusters = formClusters(dists, lineage, distance)
# Turn clusters into clone dictionary
clone_dict = {}
for i, c in enumerate(clusters):
cdr3seq = seqs_uniq[i]
for seq_id in seq_map[cdr3seq]:
clone_dict[seq_id] = c
# clone_dict.setdefault(c, []).extend(seq_map[seqs_uniq[i]])
clone_tmp = ['%s_%d' % (IDs[0], clone_dict[seq_id]) for seq_id in IDs]
return clone_tmp
def hier_clust_CDR3_PEPTIDE(group, distance):
"""
distance = 0.1 in Yuxiang/Huan
Form clusters based on hierarchical clustering of input distance matrix with
linkage type and cutoff distance, based on CDR3 aa sequence
"""
def calcDistances_AA(sequences, norm):
"""
Calculate pairwise distances between input peptide sequences
Arguments:
sequences : List of sequences for which to calculate pairwise distances
n : Length of n-mers to be used in calculating distance
dist_mat : pandas.DataFrame of mutation distances
norm : Normalization method
sym : Symmetry method
Returns:
ndarray : numpy matrix of pairwise distances between input sequences
"""
# Initialize output distance matrix
dists = np.zeros((len(sequences), len(sequences)))
# Iterate over combinations of input sequences
for j, k in combinations(list(range(len(sequences))), 2):
# Find locations with mutations
mutated = [i for i, (c1, c2) in enumerate(zip(sequences[j], sequences[k])) if c1 != c2]
seq1 = [sequences[j][i] for i in mutated]
seq2 = [sequences[k][i] for i in mutated]
# Determine normalizing factor
if norm == 'len':
norm_by = len(sequences[0])
else:
norm_by = 1
# Calculate distances
try:
dists[j, k] = dists[k, j] = \
sum([1 if c1 != c2 else 0 for c1, c2 in zip(seq1, seq2)]) / \
(norm_by)
except (KeyError):
raise KeyError('Unrecognized character in sequence.')
return dists
seqs = group['CDR3_PEPTIDE'].tolist()
IDs = group['SEQUENCE_ID'].tolist()
seqs_uniq = list(set(seqs))
seq_map = {}
for key, row in group.iterrows():
seq = row['CDR3_PEPTIDE']
ID = row['SEQUENCE_ID']
seq_map.setdefault(seq, []).append(ID)
if len(seqs_uniq) == 1:
clone_tmp = [IDs[0] for i in range(len(IDs))]
else:
dists = calcDistances_AA(seqs_uniq, 'len')
# Perform hierarchical clustering
lineage = 'single' # the shorted distance
clusters = formClusters(dists, lineage, distance)
# Turn clusters into clone dictionary
clone_dict = {}
for i, c in enumerate(clusters):
cdr3seq = seqs_uniq[i]
for seq_id in seq_map[cdr3seq]:
clone_dict[seq_id] = c
clone_tmp = ['%s_%d' % (IDs[0], clone_dict[seq_id]) for seq_id in IDs]
return clone_tmp
def getGermdict(args):
''' Read VDJ IgBlast database and obtain un-gapped germline sequences
2020/09 JH: Add in a check condition to ensure databases are read properly
'''
germ_dict = {}
try:
Vdb = getInputSeq(args.params_dict['Vdb'])
except AttributeError:
Vdb = getInputSeq(args.Vdb)
try:
Ddb = getInputSeq(args.params_dict['Ddb'])
except AttributeError:
Ddb = getInputSeq(args.Ddb)
try:
Jdb = getInputSeq(args.params_dict['Jdb'])
except AttributeError:
Jdb = getInputSeq(args.Jdb)
if not bool(Vdb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Vdb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Vdb)
if not bool(Ddb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Ddb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Ddb)
if not bool(Jdb):
print('[getGermdict] Vdb is empty... FAILED TO LOAD %s' % args.params_dict['Jdb'], file = sys.stderr, flush=True)
else:
germ_dict.update(Jdb)
return germ_dict
def collapse_db(records, collapse_type, N_Diff):
'''
Collapse reads Db file
Input:
Records: read dataframe
collaspe_type:
'identical' -- collpase input sequences are identical
'partial' -- collapse shorter sequences to longer ones
'V1' -- collapse sequences by multiple columns
N_Diff:
'T' consider N as difference
'F' consider N as not difference
Output:
'''
def _writeOutputSeq(filter_check, col_ID, col_seq, output_file, db):
"""
Generate output fasta file using db dataframe
"""
output_handle = open(output_file, 'w')
for key, row in db.iterrows():
output_handle.write('>%s\n%s\n' % (row[col_ID], row[col_seq]))
output_handle.close()
def _collapse_identical_NasDiff(records):
def __parse_group(group):
index_dupreads = ','.join(group['SEQUENCE_ID'])
# print("index_dupreads",index_dupreads, file = sys.stderr)
# print("nested function can print to console?", file=sys.stderr)
### 20200916 Lawrence: updated .ix to .loc
top_series = group.loc[group.index[0]]
top_series['DUPREAD'] = index_dupreads
return top_series
#return index_dupreads
#print("keys in records during collapse:", records.keys(), file=sys.stderr)
# YES
#print("records.shape during collapse:", records.shape, file=sys.stderr)
#print("SEQUENCE_INPUT.size in records:", records['SEQUENCE_INPUT'].size, file=sys.stderr)
grouped = records.groupby('SEQUENCE_INPUT')
#print("grouped.ngroups:", grouped.ngroups, file=sys.stderr)
#print("grouped.ndim", grouped.ndim, file=sys.stderr)
colnames = list(records) + ['DUPREAD']
records_collapse = pd.DataFrame(columns=colnames, index=range(0, len(grouped)))
### __parse_group does it work outside of grouped.apply
records_collapse = grouped.apply(__parse_group)
# records_collapse.size = 0
# print("records_collapse after apply:", records_collapse, file=sys.stderr)
# EMPTY DATAFREAM
#print("records_collapse.size:?", records_collapse.size, file=sys.stderr)
#print("records_collapse.keys():", records_collapse.keys(), file=sys.stderr)
return records_collapse
# grouped = records.groupby('SEQUENCE_INPUT')
# index_dupreads = {}
# indexList = []
# for key, group in grouped:
# idx = group.index[0]
# indexList.append(idx)
# index_dupreads[idx] = ','.join(group['SEQUENCE_ID'])
# records_collapse = records.loc[indexList]
# for idx in index_dupreads:
# records_collapse.ix[idx, 'DUPREAD'] = index_dupreads[idx]
# return records_collapse
# def _parse_read(row, records_collect):
# # Keep read with 'N'
# if 'N' in row['SEQUENCE_INPUT']:
# records_collect = records_collect.append(row)
# return records_collect
# else:
# records_cdr3 = records_collect[records_collect['CDR3_SEQ']==row['CDR3_SEQ']]
# for key,collect in records_cdr3.iterrows():
# if row['SEQUENCE_INPUT'] in collect['SEQUENCE_INPUT']:
# records_collect.ix[key, 'DUPREAD'] += ',%s' % row['DUPREAD']
# return records_collect
# records_collect = records_collect.append(row)
# return records_collect
#
# def _collapse_partial_NasDiff(records):
# colnames = list(records) #+ ['N']
# records_collect = pd.DataFrame(columns=colnames)
# for key,row in records.iterrows():
# records_collect = _parse_read(row, records_collect)
# return records_collect
def _collapse_partial_NasDiff(records):
''' Collapse shorter reads to longer ones
Process: check read one by one to see if its input seq is a substring of stored reads
Need a new method to speed up this
'''
records_collect = pd.DataFrame(columns=list(records))
### 20200916 Lawrence: updated .ix to .loc
records_collect.loc[0,] = records.loc[records.index[0]]
for key, row in records.iterrows():
if key != records.index[0]:
inputseq = row['SEQUENCE_INPUT']
j = pd.Series(records_collect['SEQUENCE_INPUT']).str.contains(inputseq)
if len(j[j == True]) >= 1:
i = j[j == True].index[0]
records_collect.loc[i, 'DUPREAD'] += ',%s' % row['DUPREAD']
elif len(j[j == True]) == 0:
records_collect.loc[len(records_collect) + 1,] = row
return records_collect
def _parse_SAM(read_readlen, sam_file, collapse_type):
inputR_refR = {}
for line in open(sam_file):
l = line.strip().split()
if l[5] == '%dM' % read_readlen[l[0]]:
if collapse_type == 'identical':
if l[5] == '%dM' % read_readlen[l[2]]:
inputR_refR[l[0]] = l[2]
else:
inputR_refR[l[0]] = l[2]
return inputR_refR
def _collapse_V1(records):
''' Collapse reads based on various result columns
'''
records_new = pd.DataFrame(columns=list(records))
grouplist = ['V_ALLELE', 'D_ALLELE', 'J_ALLELE', 'STOP', 'IN_FRAME', \
'V_END', 'V_D_JUNCTION', 'D_REGION', 'D_J_JUNCTION', \
'J_START', 'V_J_JUNCTION']
for key, group in records.groupby(grouplist):
dup = ','.join(group['DUPREAD'].values.tolist())
groupcontent = group.iloc[0]
groupcontent['DUPREAD'] = dup
records_new.loc[len(records_new) + 1,] = groupcontent
return records_new
def _collapse_NasNoDiff(records, collapse_type):
''' Required Bowtie2 software
'''
randname = str(random.randint(1, 1000000))
# Write with/wo 'N' two fa files as input/ref files in bowtie2 searching
records_woN = records[~records['SEQUENCE_INPUT'].str.contains("N")]
records_wN = records[records['SEQUENCE_INPUT'].str.contains("N")]
if len(records_woN) == 0 or len(records_wN) == 0:
return records
ref_file = '%s.ref' % randname
input_file = '%s.input' % randname
_writeOutputSeq('woN', 'SEQUENCE_ID', 'SEQUENCE_INPUT', ref_file, records_woN)
_writeOutputSeq('wN', 'SEQUENCE_ID', 'SEQUENCE_INPUT', input_file, records_wN)
sam_file = '%s.sam' % randname
os.system('bowtie2-build %s %s -q' % (ref_file, ref_file))
os.system('bowtie2 -x ./%s -f -U %s --local -S %s --no-head --np 0 --mp 1000 --rdg 1000,1000 --rfg 1000,1000' % \
(ref_file, input_file, sam_file))
read_readlen = records.set_index('SEQUENCE_ID').to_dict()['INPUT_LEN']
inputR_refR = _parse_SAM(read_readlen, sam_file, collapse_type)
records_collapsed = records[~records.SEQUENCE_ID.isin(inputR_refR.keys())].copy()
records_waitToCollapse = records[records.SEQUENCE_ID.isin(inputR_refR.keys())]
for inputR in inputR_refR:
refR = inputR_refR[inputR]
dup = records_waitToCollapse.loc[records_waitToCollapse['SEQUENCE_ID'] == inputR, 'DUPREAD'].values[0]
records_collapsed.loc[records_collapsed['SEQUENCE_ID'] == refR, 'DUPREAD'] += ',%s' % dup
os.system('rm -rf %s %s %s %s*bt2' % (ref_file, input_file, sam_file, ref_file))
return records_collapsed
# Main part in this func
# Collapse identical reads anyway
#print("records.size before collapse?", records.size, file=sys.stderr)
records = _collapse_identical_NasDiff(records)
#print("have all columns after collapse?", records, file=sys.stderr)
#print("shape of records after collapse:", records.shape, file=sys.stderr)
#print("columns of records after collapse:", records.columns, file=sys.stderr)
# [0,0]
records['INPUT_LEN'] = records["SEQUENCE_INPUT"].map(len)
records.sort_values('INPUT_LEN', ascending=False, inplace=True)
# Collapse identical reads with N as no difference
if collapse_type == 'identical' and N_Diff == 'F':
records = _collapse_NasNoDiff(records, 'identical')
elif collapse_type == 'partial':
# Collapse shorter reads to longer ones with N as difference
records = _collapse_partial_NasDiff(records)
if N_Diff == 'F':
# Collapse shorter reads to longer ones with N as no difference
records = _collapse_NasNoDiff(records, 'partial')
records = records.drop('INPUT_LEN', axis=1)
elif collapse_type == 'V1':
# V1 means same way as Version One pipeline
records = _collapse_NasNoDiff(records, 'identical')
records = _collapse_V1(records)
return records
def profile_DNAmut(group, nuc_stat, nuc_PDF, nuc_profile, args):
''' Prep DNA mutation profile, text and PDF file
2021/06 JH: Add in debugging lines to ensure databases are read properly
'''
def _parse_V_ALLELE_NUC(row):
return pd.Series([row["SEQUENCE_ID"]] + [s for s in row['V_ALLELE_NUC']])
allele = group["V_ALLELE"].unique()[0]
# print(f"allele = {allele}")
germ_dict = getGermdict(args)
if allele == "VH5-4(VH7183.a4.6":
allele_seq = germ_dict["VH5-4(VH7183.a4.6)"]
else:
try:
allele_seq = germ_dict[allele]
except KeyError:
print(f'[profile_DNAmut]: cannot find allele {allele} in Germdict when running nuc_profile for {nuc_profile}')
try:
print(f'[profile_DNAmut]: current Vdb is {args.params_dict["Vdb"]}', file = sys.stderr, flush=True)
except AttributeError:
print(f'[profile_DNAmut]: current Vdb is {args.Vdb}', file=sys.stderr, flush=True)
raise
allele_len = len(allele_seq)
colnames = ['ID'] + [l for l in allele_seq]
allele_mut = pd.DataFrame(columns=colnames, index=range(0, len(group)))
allele_mut = group.apply(_parse_V_ALLELE_NUC, axis=1)
statnames = ['Pos', 'Mut', 'Total', 'Base', 'Y', 'A', 'T', 'C', 'G']
allele_stat = pd.DataFrame(columns=statnames, index=range(1, allele_len + 1))
allele_stat['Pos'] = range(1, allele_len + 1)
allele_stat['Base'] = [l for l in allele_seq]
allele_stat[['Mut', 'Total', 'Y', 'A', 'T', 'C', 'G']] = 0
for i in range(1, allele_len + 1):
if len(allele_mut) == 1:
counts = {}
counts[allele_mut[[i]].squeeze()] = 1
else:
counts = allele_mut[[i]].squeeze().value_counts()
countA = counts.get('A', 0)
countT = counts.get('T', 0)
countC = counts.get('C', 0)
countG = counts.get('G', 0)
countMut = countA + countT + countC + countG
countTotal = countMut + counts.get('.', 0)
allele_stat.loc[i, 'Mut'] = countMut
allele_stat.loc[i, 'Total'] = countTotal
allele_stat.loc[i, 'Y'] = float(countMut / countTotal) if countTotal > 0 else 0
allele_stat.loc[i, 'A'] = countA
allele_stat.loc[i, 'T'] = countT
allele_stat.loc[i, 'C'] = countC
allele_stat.loc[i, 'G'] = countG
allele_mut.to_csv(nuc_profile, sep="\t", index=False)
allele_stat.to_csv(nuc_stat, sep="\t", index=False)
# run R scripts
if allele in args.__dict__['V_CDR']:
cdr = args.__dict__['V_CDR'][allele]
cdrstring = 'cdr1_start=%s cdr1_end=%s cdr2_start=%s cdr2_end=%s ' \
'cdr3_start=%s cdr3_end=%s' % (cdr[0], cdr[1], cdr[2],
cdr[3], cdr[4], cdr[5])
else:
cdrstring = ''
# Filter group with read number
if len(group) >= args.min_profileread:
sample = group["SAMPLE"].unique()[0]
anno = '_'.join([sample, allele])
### 20200915 Lawrence: changed showsequence from false to true
### 20200916 Lawrence: changed frpm 'Rscript %s/HTGTSrep/R/SHMPlot2.R %s %s plotrows=1 figureheight=2 showsequence=TRUE ymax=%f %s annotation=%s '
### to 'Rscript %s/HTGTSrep/R/SHMPlot2.R \"%s\" \"%s\" plotrows=1 figureheight=2 showsequence=TRUE ymax=%f %s annotation=\"%s\" '
### this allows special characters to be in V_allel names
os.system('Rscript %s/HTGTSrep/R/SHMPlot2.R \"%s\" \"%s\" plotrows=1 figureheight=2 '
'showsequence=TRUE ymax=%f %s annotation=\"%s\" ' % (args.scriptdir, nuc_stat,
nuc_PDF, args.ymax_DNA, cdrstring, anno))
def getInferSeq(treefile, group):
''' Read tree file and get inferred 1 sequence,
Using germline sequence of V and J to ensure no surprise
'''
n = 0
print("in getInferSeq, treefile:", treefile, file = sys.stderr)
with open(treefile) as f:
for line in f:
n += 1
l = line.strip().split()
if n == 2: inferseq = l[-1]
# get germline parts
cdr3 = group.iloc[0]['CDR3_MASK']
sequence_imgt = group.iloc[0]['SEQUENCE_IMGT']
Vpos = sequence_imgt.find(cdr3)
germline_imgt_seq = group.iloc[0]['GERMLINE_IMGT_D_MASK']
seq_V = germline_imgt_seq[:Vpos]
seq_Junc = inferseq[Vpos:Vpos + len(cdr3)]
if len(germline_imgt_seq) >= len(inferseq):
seq_J = germline_imgt_seq[Vpos + len(cdr3): len(inferseq)]
else:
seq_J = inferseq[Vpos + len(cdr3): len(inferseq)]
# Use V and J parts from germline as reference to avoid mismatch at these regions in mut profiling
newinfer = (seq_V + seq_Junc + seq_J).replace('.', 'N')
# print(group['CLONE'].tolist()[0], inferseq, newinfer)
return newinfer
def profile_DNAmut_clonal(inferseq, group, nuc_stat, nuc_PDF, nuc_profile, args):
''' Prep DNA mutation profile, text and PDF file
'''
allele = group["V_CALL"].unique()[0]
# Get position list of inferred seq which are not 'N'
poslist = [i for i in range(0, len(inferseq)) if inferseq[i] != 'N']
allele_seq = ''.join([inferseq[i] for i in poslist])
allele_len = len(poslist)
colnames = ['ID'] + [inferseq[i] for i in poslist]
allele_mut = pd.DataFrame(columns=colnames)
for key, row in group.iterrows():
seq = row['SEQUENCE_IMGT']
vals = [row["SEQUENCE_ID"]]
for i in poslist:
if i >= len(seq):
vals.append('-')
else:
l = seq[i]
if l == '.': vals.append('-')
if l == '-': vals.append('N')
if l in 'ATCGN':
if l == inferseq[i]:
vals.append('.')
else:
vals.append(l)
allele_mut.loc[len(allele_mut) + 1] = vals
statnames = ['Pos', 'Mut', 'Total', 'Base', 'Y', 'A', 'T', 'C', 'G']
allele_stat = pd.DataFrame(columns=statnames, index=range(1, allele_len + 1))
allele_stat['Pos'] = range(1, allele_len + 1)
allele_stat['Base'] = [l for l in allele_seq]
allele_stat[['Mut', 'Total', 'Y', 'A', 'T', 'C', 'G']] = 0
for i in range(1, allele_len + 1):
if len(allele_mut) == 1:
counts = {}
### 20200916 Lawrence: updated .ix to .iloc
counts[allele_mut.iloc[:, i].squeeze()] = 1
else:
### 20200916 Lawrence: updated .ix to .iloc
counts = allele_mut.iloc[:, i].squeeze().value_counts()
countA = counts.get('A', 0)
countT = counts.get('T', 0)
countC = counts.get('C', 0)
countG = counts.get('G', 0)
countMut = countA + countT + countC + countG
countTotal = countMut + counts.get('.', 0)
allele_stat.loc[i, 'Mut'] = countMut
allele_stat.loc[i, 'Total'] = countTotal
allele_stat.loc[i, 'Y'] = float(countMut / countTotal) if countTotal > 0 else 0
allele_stat.loc[i, 'A'] = countA
allele_stat.loc[i, 'T'] = countT
allele_stat.loc[i, 'C'] = countC
allele_stat.loc[i, 'G'] = countG
allele_mut.to_csv(nuc_profile, sep="\t", index=False)
allele_stat.to_csv(nuc_stat, sep="\t", index=False)
# Obtain CDR1,2 from preprocessed V allele alignment using KABAT definition, and CDR3 from IMGT definition
cdr3_start = len(inferseq[0:312].replace('N', '')) + 1
cdr3_end = cdr3_start + group["JUNCTION_LENGTH"].unique()[0]
if allele in args.__dict__['V_CDR']:
cdr = args.__dict__['V_CDR'][allele]
cdrstring = 'cdr1_start=%s cdr1_end=%s cdr2_start=%s cdr2_end=%s ' \
'cdr3_start=%d cdr3_end=%d' % (cdr[0], cdr[1], cdr[2],
cdr[3], cdr3_start, cdr3_end)
else:
cdrstring = ''
CLONE = group["CLONE"].unique()[0]
J_ALLELE = group["J_CALL"].unique()[0]
anno = '_'.join(['Clone%d' % CLONE, allele, J_ALLELE])
# Filter group with read number
### 20200915 Lawrence: changed showsequence from false to true
os.system('Rscript %s/HTGTSrep/R/SHMPlot2.R %s %s plotrows=1 figureheight=2 '
'showsequence=TRUE ymax=%f %s annotation=%s' % (args.scriptdir, nuc_stat,
nuc_PDF, args.ymax_DNA, cdrstring, anno))
def profile_DNAmut_clonal_errbar(inferseq, group, nuc_stat_errbar, nuc_PDF_errbar, sample_files, args):
''' Prep DNA mutation profile, text and PDF file with error bars
Input:
inferseq: inferred sequence as reference
group: group instance
nuc_stat_errbar: stat file with sem value
nuc_PDF_errbar: PDF file with error bars
samples_files: list of sample files as sample_files[sample] = [root_PDF, root_stat]
'''
allele = group["V_CALL"].unique()[0]
if len(sample_files) > 1:
stat_list = []
for sample in sample_files:
stat = pd.read_csv(sample_files[sample][1], sep="\t")
stat_list.append(stat)
stat_all = pd.concat(stat_list, ignore_index=True)
pos_max = stat_all['Pos'].max()
statcols = list(stat_all) + ['Err']
stat_new =
|
pd.DataFrame(columns=statcols, dtype='int64')
|
pandas.DataFrame
|
# coding=utf-8
# Imports the Google Cloud client library
import logging
import string
import sys
import html
import numpy as np
import pandas as pd
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from google.cloud import language
from google.cloud import translate
from google.cloud.language import enums
from google.cloud.language import types
def get_ner_location_of_text(text):
no_random_road_groups = []
no_hospital_loc_groups = []
loc_groups = []
loc_entities = []
loc_entities_indices = []
loc_entities_word_indices = []
biggest_group_index = -1
reference_grouping = False
# Instantiates the clients
client = language.LanguageServiceClient()
translate_client = translate.Client()
# Translate
result = translate_client.translate(text, target_language='en', source_language='iw')
translated_text = result['translatedText']
translated_text = html.unescape(translated_text)
# Pre-processing - from what I saw only the first line has the location
translated_text = list(filter(None, translated_text.split('.')))[0]
# Analyze (Named Entity Recognition)
document = types.Document(content=translated_text, type=enums.Document.Type.PLAIN_TEXT)
response = client.analyze_entities(document=document)
# Getting the location entities and their indices in the text and adding them to a list
translated_text_word_split = list(filter(None, translated_text.split(' ')))
for entity in response.entities:
if entity.type == enums.Entity.Type.LOCATION:
if ' ' in entity.name:
for item in list(filter(None, entity.name.split(' '))):
loc_entities.append(item)
loc_entities_indices.append(translated_text.index(entity.name) + entity.name.index(item))
else:
loc_entities.append(entity.name)
loc_entities_indices.append(translated_text.index(entity.name))
# In case there is a reference to a previous location
if 'city' == entity.name.lower() or 'town' == entity.name.lower() or 'village' == entity.name.lower() or \
'junction' == entity.name.lower() or 'interchange' == entity.name.lower() or \
'intersect' == entity.name.lower() or 'street' == entity.name.lower():
reference_grouping = True
# Sort entities by appearing order in the string
loc_entities = [x for _, x in sorted(zip(loc_entities_indices, loc_entities))]
loc_entities_new = []
for item in loc_entities:
loc_entities_word_indices.append(
[idx for idx, s in enumerate(translated_text_word_split) if item in s][loc_entities_new.count(item)])
loc_entities_new.append(item)
loc_entities = loc_entities_new
# Location grouping - takes the largest group of words indicating location based on distance between groups
if len(loc_entities) >= 1:
diff = [loc_entities_word_indices[i + 1] - loc_entities_word_indices[i] for i in
range(len(loc_entities_word_indices) - 1)]
if diff and max(diff) > 5: # Distance is greater than 5 words
avg = sum(diff) / len(diff)
loc_groups = [[loc_entities_word_indices[0]]]
for x in loc_entities_word_indices[1:]:
if x - loc_groups[-1][-1] < avg:
loc_groups[-1].append(x)
else:
loc_groups.append([x])
# 'road' alone is recognised as a location, so if road is alone in the group, ignore it
no_random_road_groups = [group for group in loc_groups
if
not (len(group) == 1 and 'road' == translated_text_word_split[group[0]].lower())]
# We are not interested in the hospital location, unless the city isn't mentioned elsewhere
no_hospital_loc_groups = [group for group in no_random_road_groups
if not
any('hospital' in translated_text_word_split[item].lower() for item in group)]
bounds_loc_groups = [i[-1] - i[0] for ind, i in enumerate(no_hospital_loc_groups)]
biggest_group_index = bounds_loc_groups.index(max(bounds_loc_groups))
# Entities of the largest group
loc_entities = [translated_text_word_split[item] for item in no_hospital_loc_groups[biggest_group_index]]
# Getting the full string from the text indicating the location and not just entities
translated_location = translated_text[
translated_text.index(loc_entities[0]):translated_text.index(loc_entities[-1]) + len(
loc_entities[-1])]
# If there was a 'the' before the string, add it
if translated_text[translated_text.index(loc_entities[0]) - 4:translated_text.index(loc_entities[0])].lower() \
== 'the ':
translated_location = translated_text[
translated_text.index(loc_entities[0]) - 4:translated_text.index(
loc_entities[-1]) + len(
loc_entities[-1])]
# If a location without name is in the beginning of the string, add the previous word
if translated_location.lower().startswith('street') or translated_location.lower().startswith('interchange') \
or translated_location.lower().startswith('village') or translated_location.lower().startswith('town') \
or translated_location.lower().startswith('city') or translated_location.lower().startswith(
'intersection') \
or translated_location.lower().startswith('junction'):
translated_location = translated_text_word_split[translated_text_word_split.index(loc_entities[0]) - 1] \
+ ' ' + translated_location
reference_grouping = False
# Trying to solve the reference in case there is another group - first without the hospital group
if reference_grouping and len(no_hospital_loc_groups) >= 2:
previous = sys.maxsize
if biggest_group_index > 0:
previous = no_hospital_loc_groups[biggest_group_index][0] - \
no_hospital_loc_groups[biggest_group_index - 1][-1]
# Take the previous group, and from there, the last word, closest road to current group
if previous != sys.maxsize:
text_to_replace = translated_text_word_split[
no_hospital_loc_groups[biggest_group_index - 1][-1]]
if len(no_hospital_loc_groups[biggest_group_index - 1]) > 1:
last = no_hospital_loc_groups[biggest_group_index - 1][-1]
for index, val in enumerate(loc_groups[biggest_group_index - 1][::-1][1:]):
if last - val == 1:
text_to_replace = translated_text_word_split[
no_hospital_loc_groups[biggest_group_index - 1][
-2 - index]] + ' ' + text_to_replace
last = val
else:
break
translated_location = translated_location.replace(
'the junction', text_to_replace).replace(
'the intersect', text_to_replace).replace(
'the interchange', text_to_replace).replace(
'the street', text_to_replace).replace(
'the city', text_to_replace).replace(
'the town', text_to_replace).replace(
'the village', text_to_replace)
# Without hospital there weren't enough groups, so use it as well
elif reference_grouping and len(no_random_road_groups) >= 2:
previous = sys.maxsize
bounds_loc_groups = [i[-1] - i[0] for ind, i in enumerate(no_random_road_groups)]
biggest_group_index = bounds_loc_groups.index(max(bounds_loc_groups))
if biggest_group_index > 0:
previous = no_random_road_groups[biggest_group_index][0] - \
no_random_road_groups[biggest_group_index - 1][-1]
# Take the previous group, and from there, the last word, closest road to current group
if previous != sys.maxsize and 'hospital' not in \
translated_text_word_split[no_random_road_groups[biggest_group_index - 1][-1]].lower():
text_to_replace = translated_text_word_split[
no_random_road_groups[biggest_group_index - 1][-1]]
if len(no_random_road_groups[biggest_group_index - 1]) > 1:
last = no_random_road_groups[biggest_group_index - 1][-1]
for index, val in enumerate(loc_groups[biggest_group_index - 1][::-1][1:]):
if last - val == 1:
text_to_replace = translated_text_word_split[
no_random_road_groups[biggest_group_index - 1][
-2 - index]] + ' ' + text_to_replace
last = val
else:
break
translated_location = translated_location.replace(
'the junction', text_to_replace).replace(
'the intersect', text_to_replace).replace(
'the interchange', text_to_replace).replace(
'the street', text_to_replace).replace(
'the city', text_to_replace).replace(
'the town', text_to_replace).replace(
'the village', text_to_replace)
elif len(loc_entities) == 1:
translated_location = loc_entities
# If there was 'the' before the entity, add it
if translated_text[translated_text.index(loc_entities[0]) - 4:translated_text.index(loc_entities[0])].lower() \
== 'the ':
translated_location = translated_text[
translated_text.index(loc_entities[0]):translated_text.index(loc_entities[0]) + len(
loc_entities[0])]
# If the entity is a location without name, add previous word
if translated_location.lower().startswith('street') or translated_location.lower().startswith('interchange') \
or translated_location.lower().startswith('village') or translated_location.lower().startswith('town') \
or translated_location.lower().startswith('city') or translated_location.lower().startswith(
'intersection') \
or translated_location.lower().startswith('junction'):
translated_location = translated_text_word_split[translated_text_word_split.index(loc_entities[0]) - 1] \
+ ' ' + translated_location
else:
translated_location = ''
# Processing the location
translated_location = translated_location.strip()
if translated_location != '' and ',' == translated_location[-1]:
translated_location = translated_location[:-1]
translated_location = html.unescape(translated_location)
if translated_location == '':
translated_location = 'failed to extract location'
logging.info('location found: ' + translated_location)
return translated_location
def remove_text_inside_brackets(text, brackets="()[]{}"):
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0 # keep it
else: # found bracket to remove
break
else: # character is not a [balanced] bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
def preprocess_text(text, get_first=False):
table_no_dot = str.maketrans(string.punctuation.replace('.', ''),
' ' * len(string.punctuation.replace('.', ''))) # remove punctuation, without '.'
table = str.maketrans(string.punctuation, ' ' * len(string.punctuation)) # remove punctuation
if type(text) != str:
text = str(text)
if any(key in text for key in '()[]{}'):
text = remove_text_inside_brackets(text)
if get_first:
return (' '.join(text.translate(table_no_dot).split())).strip().split('.')[
0] # remove multiple whitespaces and return first sentence
else:
return (' '.join(text.translate(table).split())).strip() # remove multiple whitespaces
def preprocess_intersection(intersections):
intersections = intersections.replace('יישוב', '')
intersections = intersections.replace('ישוב', '')
intersections = intersections.replace('מושבה', '')
intersections = intersections.replace('מושב', '')
intersections = intersections.replace('צומת ל', '')
intersections = intersections.replace('צומת', '')
intersections = intersections.replace('מחלף', '')
intersections = intersections.replace('כניסה ל', '')
intersections = intersections.strip()
return intersections
def process_streets_table(addresses_df):
streets = pd.DataFrame(addresses_df.drop(
['road1', 'road2', 'non_urban_intersection_hebrew'], axis=1))
streets.yishuv_name = streets.yishuv_name.astype(str)
streets.street1_hebrew = streets.street1_hebrew.astype(str)
streets.street2_hebrew = streets.street2_hebrew.astype(str)
streets['city'] = streets.yishuv_name
streets['street1'] = streets.street1_hebrew
streets['street2'] = streets.street2_hebrew
streets.city = streets.city.apply(preprocess_text)
streets.street1 = streets.street1.apply(preprocess_text)
streets.street2 = streets.street2.apply(preprocess_text)
streets = streets[(streets.city != streets.street1) & (streets.city != streets.street2)
& (streets.city != 'NaN')]
streets = streets.replace('NaN', np.nan)
streets = streets.dropna(how='all')
streets = streets.drop_duplicates()
streets = streets.replace(np.nan, 'NaN')
return streets
def process_roads_table(addresses_df):
roads = pd.DataFrame(addresses_df[['road1', 'road2', 'non_urban_intersection_hebrew']])
roads.road1 = roads.road1.astype(str)
roads.road2 = roads.road2.astype(str)
roads.non_urban_intersection_hebrew = roads.non_urban_intersection_hebrew.astype(str)
roads['first_road'] = roads.road1
roads['second_road'] = roads.road2
roads['intersection'] = roads.non_urban_intersection_hebrew
roads.first_road = 'כביש ' + roads.first_road
roads.second_road = 'כביש ' + roads.second_road
roads.loc[roads.first_road == 'כביש -1'] = np.nan
roads.loc[roads.second_road == 'כביש -1'] = np.nan
roads.loc[roads.intersection == 'צומת'] = np.nan
roads.loc[roads.intersection == 'מחלף'] = np.nan
roads.intersection = roads.intersection.apply(preprocess_text)
roads.intersection = roads.intersection.apply(preprocess_intersection)
roads = roads.replace('nan', np.nan)
roads = roads.dropna(how='all')
roads = roads.drop_duplicates()
roads = roads.replace(np.nan, 'NaN')
return roads
def first_init():
addresses_df = pd.read_excel('anyway/parsers/news_flash/Addresses_new.xlsx', sheet_name='Sheet1')
addresses_df = addresses_df.fillna('NaN')
streets = process_streets_table(addresses_df)
roads = process_roads_table(addresses_df)
cities = streets.city.drop_duplicates()
streets.to_excel('anyway/parsers/news_flash/streets.xlsx')
roads.to_excel('anyway/parsers/news_flash/roads.xlsx')
cities.to_excel('anyway/parsers/news_flash/cities.xlsx')
def preprocess_urban_text(text, cities, threshold=90):
text_new = text
if 'רחוב ' in text:
text_new = text.split('רחוב ')[1].strip()
suspected_city = process.extractOne(text_new, cities, scorer=fuzz.partial_ratio, score_cutoff=threshold)
if suspected_city is None:
text_new = text
elif 'דרך' in text:
text_new = text.split('דרך')[1]
text_new = ('דרך' + text_new).strip()
suspected_city = process.extractOne(text_new, cities, scorer=fuzz.partial_ratio, score_cutoff=threshold)
if suspected_city is None:
text_new = text
elif "שד'" in text:
text_new = text.split("שד'")[1]
text_new = ("שד'" + text_new).strip()
suspected_city = process.extractOne(text_new, cities, scorer=fuzz.partial_ratio, score_cutoff=threshold)
if suspected_city is None:
text_new = text
return text_new
def preprocess_nonurban_text(text, intersections, threshold=80):
text_new = text
if 'צומת' in text:
text_new = text.split('צומת')[1].strip()
suspected_intersection = process.extractOne(text_new, intersections.intersection, scorer=fuzz.token_set_ratio,
score_cutoff=threshold)
if suspected_intersection is None:
text_new = text
elif 'מחלף' in text:
text_new = text.split('מחלף')[1].strip()
suspected_intersection = process.extractOne(text_new, intersections.intersection, scorer=fuzz.token_set_ratio,
score_cutoff=threshold)
if suspected_intersection is None:
text_new = text
elif 'כניסה ל' in text:
text_new = text.split('כניסה ל')[1].strip()
suspected_intersection = process.extractOne(text_new, intersections.intersection, scorer=fuzz.token_set_ratio,
score_cutoff=threshold)
if suspected_intersection is None:
text_new = text
elif 'כביש' in text:
text_new = text.split('כביש')[1].strip()
suspected_intersection = process.extractOne(text_new, intersections.intersection, scorer=fuzz.token_set_ratio,
score_cutoff=threshold)
if suspected_intersection is None:
text_new = text
return text_new
class UrbanAddress:
def __init__(self, city='NaN', street='NaN'):
self.city = city
self.street = street
def __str__(self):
return 'city: ' + str(self.city) + ', street: ' + \
str(self.street)
def __repr__(self):
return "UrbanAddress(%s, %s)" % (self.city, self.street)
def __eq__(self, other):
if isinstance(other, UrbanAddress):
return (self.city == other.city) and (self.street == other.street)
else:
return False
def __hash__(self):
return hash(self.__repr__())
class NonUrbanAddress:
def __init__(self, road1='NaN', road2='NaN', intersection='NaN'):
self.road1 = road1
self.road2 = road2
self.intersection = intersection
def __str__(self):
return 'road1: ' + str(self.road1) + ', road2:' \
+ str(self.road2) + ', intersection: ' + str(self.intersection)
def __repr__(self):
return "NonUrbanAddress(%s, %s, %s)" % (self.road1, self.road2, self.intersection)
def __eq__(self, other):
if isinstance(other, NonUrbanAddress):
return ((self.road1 == other.road1) and (self.road2 == other.road2) and (
self.intersection == other.intersection))
else:
return False
def __hash__(self):
return hash(self.__repr__())
def process_urban(text, streets, cities, threshold_city=70, threshold_street=50, ratio=0.85):
text = preprocess_urban_text(text, cities)
suspected_city = process.extractOne(text, cities, scorer=fuzz.partial_ratio, score_cutoff=threshold_city)
if suspected_city is not None:
suspected_city = suspected_city[0]
streets_in_city = streets.loc[streets.city == suspected_city]
relevant_streets_1 = streets_in_city.loc[(streets_in_city.street1 != 'NaN')].street1
relevant_streets_2 = streets_in_city.loc[(streets_in_city.street2 != 'NaN')].street2
relevant_streets = relevant_streets_1.append(relevant_streets_2).drop_duplicates()
relevant_streets_scores = relevant_streets.apply(lambda x: streets_in_city
.loc[(streets_in_city.street1 == x) |
(streets_in_city.street2 == x)].avg_accidents.max())
relevant_streets = pd.DataFrame(
{'street': relevant_streets.tolist(), 'avg_accidents': relevant_streets_scores.tolist()})
suspected_streets = process.extract(text, list(set(relevant_streets.street.dropna().tolist())),
scorer=fuzz.token_set_ratio, limit=3)
if len(suspected_streets) > 0:
relevant_streets_scores = relevant_streets.loc[
relevant_streets.street.isin([suspected_street[0] for suspected_street in suspected_streets])].copy()
relevant_streets_scores.avg_accidents = (
relevant_streets_scores.avg_accidents / relevant_streets_scores.avg_accidents.max()).copy()
suspected_streets = [(suspected_street[0],
(ratio * fuzz.token_set_ratio(text, suspected_city[0] + ' ' + suspected_street[0]))
+ ((1 - ratio) * 100 * relevant_streets_scores.loc[
relevant_streets_scores.street == suspected_street[0]].avg_accidents.iloc[0]))
for suspected_street in suspected_streets if suspected_street is not None and
(ratio * fuzz.token_set_ratio(text, suspected_city[0] + ' ' + suspected_street[0]))
+ ((1 - ratio) * 100 * relevant_streets_scores.loc[
relevant_streets_scores.street == suspected_street[0]].avg_accidents.iloc[0])
> threshold_street]
if len(suspected_streets) > 0:
suspected_street = max(suspected_streets, key=lambda x: x[1])
suspected_street = suspected_street[0]
if suspected_street in streets_in_city.street1.tolist():
suspected_street = streets_in_city.loc[streets_in_city.street1 == suspected_street].iloc[0]
return UrbanAddress(city=suspected_street.yishuv_name, street=suspected_street.street1_hebrew)
else:
suspected_street = streets_in_city.loc[streets_in_city.street2 == suspected_street].iloc[0]
return UrbanAddress(city=suspected_street.yishuv_name, street=suspected_street.street2_hebrew)
return UrbanAddress(city=streets.loc[streets.city == suspected_city].yishuv_name.iloc[0])
return None
def process_intersection_first_road(text, roads, road1_candidates, threshold=50):
relevant_intersections = None
for road1_candidate in road1_candidates:
if relevant_intersections is None:
relevant_intersections = roads.loc[
(roads.first_road == road1_candidate) | (roads.second_road == road1_candidate)]
else:
relevant_intersections = relevant_intersections.append(
roads.loc[(roads.first_road == road1_candidate) | (roads.second_road == road1_candidate)])
if relevant_intersections is not None:
relevant_intersections = relevant_intersections.drop_duplicates()
text = preprocess_nonurban_text(text, relevant_intersections)
suspected_intersection = process.extractOne(text,
list(set(relevant_intersections.intersection.dropna().tolist())),
scorer=fuzz.token_set_ratio, score_cutoff=threshold)
if suspected_intersection is not None:
suspected_intersection = suspected_intersection[0]
suspected_road = \
relevant_intersections.loc[relevant_intersections.intersection == suspected_intersection].iloc[0]
first_road_value = suspected_road.road1
second_road_value = suspected_road.road2
intersection_value = suspected_road.non_urban_intersection_hebrew
return NonUrbanAddress(road1=first_road_value, road2=second_road_value, intersection=intersection_value)
return NonUrbanAddress(road1=road1_candidates[0].replace('כביש ', ''))
def process_intersection_no_roads(text, roads, threshold=50):
relevant_intersections = roads.drop_duplicates()
text = preprocess_nonurban_text(text, relevant_intersections)
suspected_intersection = process.extractOne(text, list(set(relevant_intersections.intersection.dropna().tolist())),
scorer=fuzz.token_set_ratio, score_cutoff=threshold)
if suspected_intersection is not None:
suspected_intersection = suspected_intersection[0]
suspected_road = relevant_intersections.loc[relevant_intersections.intersection == suspected_intersection].iloc[
0]
first_road_value = suspected_road.road1
second_road_value = suspected_road.road2
intersection_value = suspected_road.non_urban_intersection_hebrew
return NonUrbanAddress(road1=first_road_value, road2=second_road_value, intersection=intersection_value)
return None
def process_intersections_both_roads(text, roads, roads_candidates, threshold=50):
relevant_intersections = None
for candidate in roads_candidates:
if relevant_intersections is None:
relevant_intersections = roads.loc[
((roads.first_road == candidate[0]) & (roads.second_road == candidate[1])) | (
(roads.first_road == candidate[1]) & (roads.second_road == candidate[0]))]
else:
relevant_intersections = relevant_intersections.append(roads.loc[((roads.first_road == candidate[0]) & (
roads.second_road == candidate[1])) | ((roads.first_road == candidate[1]) &
(roads.second_road == candidate[0]))])
if relevant_intersections is not None:
relevant_intersections = relevant_intersections.drop_duplicates()
text = preprocess_nonurban_text(text, relevant_intersections)
suspected_intersection = process.extractOne(text,
list(set(relevant_intersections.intersection.dropna().tolist())),
scorer=fuzz.token_set_ratio, score_cutoff=threshold)
if suspected_intersection is not None:
suspected_intersection = suspected_intersection[0]
suspected_road = \
relevant_intersections.loc[relevant_intersections.intersection == suspected_intersection].iloc[0]
first_road_value = suspected_road.road1
second_road_value = suspected_road.road2
intersection_value = suspected_road.non_urban_intersection_hebrew
return NonUrbanAddress(road1=first_road_value, road2=second_road_value, intersection=intersection_value)
return NonUrbanAddress(road1=roads_candidates[0][0].replace('כביש ', ''),
road2=roads_candidates[0][1].replace('כביש ', ''))
def is_urban(text):
road_examples = ['כביש ' + str(digit) for digit in range(10)]
return not any(road_example in text for road_example in road_examples)
def process_nonurban(text, roads):
road1_candidates = []
roads_candidates = []
for road1 in roads.first_road:
if text.find(road1) != -1:
if text.endswith(road1) or not \
('0' <= text[text.find(road1) + len(road1)] <= '9'):
road1_candidates.append(road1)
if len(road1_candidates) > 0:
road1_candidates = list(sorted(set(road1_candidates)))
for road1 in road1_candidates:
road2_candidates = roads.loc[roads.first_road==road1].second_road.dropna().tolist()
for road2 in road2_candidates:
if text.find(road2) != -1:
if text.endswith(road2) or not \
('0' <= text[text.find(road2) + len(road2)] <= '9'):
roads_candidates.append((road1, road2))
if len(roads_candidates) > 0:
roads_candidates = list(sorted(set(roads_candidates)))
return process_intersections_both_roads(text, roads, roads_candidates)
else:
return process_intersection_first_road(text, roads, road1_candidates)
else:
return process_intersection_no_roads(text, roads)
def get_db_matching_location_of_text(text):
text = preprocess_text(text, True)
if is_urban(text):
streets = pd.read_excel('anyway/parsers/news_flash/streets.xlsx', sheet_name='Sheet1')
cities =
|
pd.read_excel('anyway/parsers/news_flash/cities.xlsx', sheet_name='Sheet1')
|
pandas.read_excel
|
from isitfit.cost.ec2_analyze import BinCapUsed
import datetime as dt
import pytest
import pandas as pd
@pytest.fixture
def FakeMm():
class FakeMm:
StartTime = dt.datetime(2019,1,15)
EndTime = dt.datetime(2019,4,15)
return FakeMm
class TestBinCapUsedHandlePre:
def test_preNoBreak(self, FakeMm):
bcs = BinCapUsed()
ret = bcs.handle_pre({'mainManager': FakeMm()})
assert ret is not None
def test_3m(self, FakeMm):
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
e = pd.DataFrame([
(dt.date(2019,1,31), 0, 0, 0, frozenset([]), dt.date(2019,1,31), dt.date(2019,1,1), ),
(dt.date(2019,2,28), 0, 0, 0, frozenset([]), dt.date(2019,2,28), dt.date(2019,2,1), ),
(dt.date(2019,3,31), 0, 0, 0, frozenset([]), dt.date(2019,3,31), dt.date(2019,3,1), ),
(dt.date(2019,4,30), 0, 0, 0, frozenset([]), dt.date(2019,4,30), dt.date(2019,4,1), ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = pd.to_datetime(e[fx])
e.set_index('Timestamp', inplace=True)
pd.testing.assert_frame_equal(e, bcs.df_bins)
class TestBinCapUsedPerEc2:
def test_1m(self, FakeMm):
# prepare input
df1 = pd.DataFrame([
(dt.date(2019,1,15), 10, 50),
(dt.date(2019,1,16), 12, 50),
(dt.date(2019,1,17), 12, 50),
],
columns=['Timestamp','capacity_usd','used_usd']
)
# calculate
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
ctx = {'ec2_df': df1, 'ec2_dict': {'Region': 'us-west-2'}}
bcs.per_ec2(ctx)
bcs.per_ec2(ctx)
# expected
e = pd.DataFrame([
(dt.date(2019,1,31), 68, 300, 2, frozenset(['us-west-2']), dt.date(2019,1,15), dt.date(2019,1,17), ),
(dt.date(2019,2,28), 0, 0, 0, frozenset([]), dt.date(2019,2,28), dt.date(2019,2, 1), ),
(dt.date(2019,3,31), 0, 0, 0, frozenset([]), dt.date(2019,3,31), dt.date(2019,3, 1), ),
(dt.date(2019,4,30), 0, 0, 0, frozenset([]), dt.date(2019,4,30), dt.date(2019,4, 1), ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = pd.to_datetime(e[fx])
e.set_index('Timestamp', inplace=True)
# test expected = actual
pd.testing.assert_frame_equal(e, bcs.df_bins)
def test_3m(self, FakeMm):
# prepare input
s_ts = pd.date_range(start=dt.date(2019,1,15), end=dt.date(2019,4,15), freq='D')
# parameters for simple case, no fluctuations
cap1 = 10 # USD/day
cap2 = 20 # USD/day
#import numpy as np
# s_used = np.random.rand(len(s_ts)) # random usage between 0 and 100%
s_used = 0.3 # 30% usage
# dataframes
df1 = pd.DataFrame({
'Timestamp': s_ts,
'capacity_usd': cap1,
'used_usd': s_used*cap1
})
df2 = pd.DataFrame({
'Timestamp': s_ts,
'capacity_usd': cap2,
'used_usd': s_used*cap2
})
# int for simplicity
df1['used_usd'] = df1['used_usd'].astype(int)
df2['used_usd'] = df2['used_usd'].astype(int)
# calculate
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
ctx1 = {'ec2_df': df1, 'ec2_dict': {'Region': 'us-west-2'}}
bcs.per_ec2(ctx1)
ctx2 = {'ec2_df': df2, 'ec2_dict': {'Region': 'us-west-2'}}
bcs.per_ec2(ctx2)
# expected
e = pd.DataFrame([
(dt.date(2019,1,31), 510, 153, 2, frozenset(['us-west-2']), dt.date(2019,1,15), dt.date(2019,1,31), ),
(dt.date(2019,2,28), 840, 252, 2, frozenset(['us-west-2']), dt.date(2019,2, 1), dt.date(2019,2,28), ),
(dt.date(2019,3,31), 930, 279, 2, frozenset(['us-west-2']), dt.date(2019,3, 1), dt.date(2019,3,31), ),
(dt.date(2019,4,30), 450, 135, 2, frozenset(['us-west-2']), dt.date(2019,4, 1), dt.date(2019,4,15), ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = pd.to_datetime(e[fx])
e.set_index('Timestamp', inplace=True)
# test expected = actual
pd.testing.assert_frame_equal(e, bcs.df_bins)
class TestBinCapUsedAfterAll:
def test_preNoBreak(self, FakeMm):
bcs = BinCapUsed()
ret = bcs.handle_pre({'mainManager': FakeMm()})
assert ret is not None
def test_3m(self, FakeMm):
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
bcs.after_all({})
import numpy as np
e = pd.DataFrame([
(dt.date(2019,1,31), 0, 0, 0, frozenset([]), np.nan, np.nan, 0, '0', ),
(dt.date(2019,2,28), 0, 0, 0, frozenset([]), np.nan, np.nan, 0, '0', ),
(dt.date(2019,3,31), 0, 0, 0, frozenset([]), np.nan, np.nan, 0, '0', ),
(dt.date(2019,4,30), 0, 0, 0, frozenset([]), np.nan, np.nan, 0, '0', ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end', 'used_pct', 'regions_str']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = pd.to_datetime(e[fx])
e.set_index('Timestamp', inplace=True)
pd.testing.assert_frame_equal(e, bcs.df_bins)
@pytest.fixture
def example_dataframe():
import datetime as dt
d = pd.date_range(dt.date(2019,9,1), dt.date(2019,12,1))
df = pd.DataFrame({'Timestamp': d, 'a': range(len(d))})
df['Timestamp'] = pd.to_datetime(df.Timestamp)
dt_start = df.Timestamp.min()
dt_end = df.Timestamp.max()
df = df.set_index('Timestamp')
return df, dt_start, dt_end
class TestBinCapUsedResample:
def test_07d(self, example_dataframe):
dfi, dt_start, dt_end = example_dataframe
bcs = BinCapUsed()
bcs._set_freq(7)
dfe_actual = bcs.do_resample_end(dfi).sum()
dfs_actual = bcs.do_resample_start(dfi).sum()
#dfe_actual = bcs.fix_resample_end(dfe_actual, dfs_actual)
#dfs_actual = bcs.fix_resample_start(dfs_actual, dfe_actual, dt_start, dt_end)
idxs_expected = pd.date_range(dt.date(2019,9,1), dt.date(2019,12,1))
assert (dfs_actual.index == idxs_expected).all()
# notice this is different than the "df_daily.resample('1D', label='right')" below
idxe_expected = pd.date_range(dt.date(2019,9,1), dt.date(2019,12,1))
assert (dfe_actual.index == idxe_expected).all()
def test_30d(self, example_dataframe):
dfi, dt_start, dt_end = example_dataframe
bcs = BinCapUsed()
bcs._set_freq(30)
dfe_actual = bcs.do_resample_end(dfi).sum()
dfs_actual = bcs.do_resample_start(dfi).sum()
#dfe_actual = bcs.fix_resample_end(dfe_actual, dfs_actual)
#dfs_actual = bcs.fix_resample_start(dfs_actual, dfe_actual, dt_start, dt_end)
idxs_expected = [
dt.date(2019, 8,26),
dt.date(2019, 9, 2), dt.date(2019, 9, 9), dt.date(2019, 9,16), dt.date(2019, 9,23),
dt.date(2019, 9,30),
dt.date(2019,10, 7), dt.date(2019,10,14), dt.date(2019,10,21), dt.date(2019,10,28),
dt.date(2019,11, 4), dt.date(2019,11,11), dt.date(2019,11,18), dt.date(2019,11,25),
]
assert (dfs_actual.index == idxs_expected).all()
idxe_expected = [
dt.date(2019, 9, 1), dt.date(2019, 9, 8), dt.date(2019, 9,15), dt.date(2019, 9,22),
dt.date(2019, 9,29),
dt.date(2019,10, 6), dt.date(2019,10,13), dt.date(2019,10,20), dt.date(2019,10,27),
dt.date(2019,11, 3), dt.date(2019,11,10), dt.date(2019,11,17), dt.date(2019,11,24),
dt.date(2019,12, 1),
]
assert (dfe_actual.index == idxe_expected).all()
def test_60d(self, example_dataframe):
dfi, dt_start, dt_end = example_dataframe
bcs = BinCapUsed()
bcs._set_freq(60)
dfe_actual = bcs.do_resample_end(dfi).sum()
dfs_actual = bcs.do_resample_start(dfi).sum()
#dfe_actual = bcs.fix_resample_end(dfe_actual, dfs_actual)
#dfs_actual = bcs.fix_resample_start(dfs_actual, dfe_actual, dt_start, dt_end)
idxs_expected = [
dt.date(2019, 9, 1), dt.date(2019, 9,15),
dt.date(2019,10, 1), dt.date(2019,10,15),
dt.date(2019,11, 1), dt.date(2019,11,15),
dt.date(2019,12, 1),
]
assert (dfs_actual.index == idxs_expected).all()
# notice this is different than idx_exp_1SM_right_right below
# Update 2019-12-17 without the fix_resample_* function, this is the same as idx_exp_1SM_right_right
#idxe_expected = [
# dt.date(2019, 9,14), dt.date(2019, 9,30),
# dt.date(2019,10,14), dt.date(2019,10,31),
# dt.date(2019,11,14), dt.date(2019,11,30),
# dt.date(2019,12,14),
#]
idxe_expected = [
dt.date(2019, 9,15), dt.date(2019, 9,30),
dt.date(2019,10,15), dt.date(2019,10,31),
dt.date(2019,11,15), dt.date(2019,11,30),
dt.date(2019,12,15),
]
assert (dfe_actual.index == idxe_expected).all()
def test_90d(self, example_dataframe):
dfi, dt_start, dt_end = example_dataframe
bcs = BinCapUsed()
bcs._set_freq(90)
dfe_actual = bcs.do_resample_end(dfi).sum()
dfs_actual = bcs.do_resample_start(dfi).sum()
#dfe_actual = bcs.fix_resample_end(dfe_actual, dfs_actual)
#dfs_actual = bcs.fix_resample_start(dfs_actual, dfe_actual, dt_start, dt_end)
idxs_expected = [
dt.date(2019, 9, 1),
dt.date(2019,10, 1),
dt.date(2019,11, 1),
dt.date(2019,12, 1),
]
assert (dfs_actual.index == idxs_expected).all()
idxe_expected = [
dt.date(2019, 9,30),
dt.date(2019,10,31),
dt.date(2019,11,30),
dt.date(2019,12,31),
]
assert (dfe_actual.index == idxe_expected).all()
class TestPandasResample:
"""
General tests on pandas resample method
"""
def test_resample(self, example_dataframe):
"""
Setting closed=... only affects 1 case (check comments below)
"""
df_daily, dt_start, dt_end = example_dataframe
# expected indeces
idx_exp_1WMON_left_left = [
dt.date(2019, 8,26),
dt.date(2019, 9, 2), dt.date(2019, 9, 9), dt.date(2019, 9,16), dt.date(2019, 9,23),
dt.date(2019, 9,30),
dt.date(2019,10, 7), dt.date(2019,10,14), dt.date(2019,10,21), dt.date(2019,10,28),
dt.date(2019,11, 4), dt.date(2019,11,11), dt.date(2019,11,18), dt.date(2019,11,25),
]
col_exp_1WMON_left_left = [
0,
28, 77, 126, 175,
224,
273, 322, 371, 420,
469, 518, 567, 616,
]
df_exp_1WMON_left_left = pd.DataFrame({'a': col_exp_1WMON_left_left, 'Timestamp': idx_exp_1WMON_left_left})
df_exp_1WMON_left_left['Timestamp'] = pd.to_datetime(df_exp_1WMON_left_left.Timestamp)
df_exp_1WMON_left_left.set_index('Timestamp', inplace=True)
col_exp_1WMON_left_skip = [
1,
35, 84, 133, 182,
231,
280, 329, 378, 427,
476, 525, 574, 531,
]
df_exp_1WMON_left_skip = pd.DataFrame({'a': col_exp_1WMON_left_skip, 'Timestamp': idx_exp_1WMON_left_left})
df_exp_1WMON_left_skip['Timestamp'] =
|
pd.to_datetime(df_exp_1WMON_left_skip.Timestamp)
|
pandas.to_datetime
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Make model predictions using this load.py script. This loads in all models in this
directory and makes predictions on a target folder. Note that files in this target
directory will be featurized with the default features as specified by the settings.json.
Usage: python3 load.py [target directory] [sampletype] [target model directory]
Example: python3 load.py /Users/jim/desktop/allie/load_dir audio /Users/jim/desktop/gender_tpot_classifier
Alt Usage: python3 load.py
--> this just loads all the models and makes predictions in the ./load_dir
'''
import os, json, pickle, time, sys, shutil
import pandas as pd
import numpy as np
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def model_schema():
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()
}
return models
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetypes=list(set(filetypes))
return filetypes
def get_features(models, actual_model_dir, sampletype):
models=models['%s_models'%(sampletype)]
features=list()
for i in range(len(models)):
os.chdir(actual_model_dir+'/'+models[i])
temp_settings=json.load(open('settings.json'))
features=features+temp_settings['default_%s_features'%(sampletype)]
# get only the necessary features for all models
default_features=list(set(features))
return default_features
def featurize(features_dir, load_dir, model_dir, filetypes, models):
# contextually load the proper features based on the model information
actual_model_dir=prev_dir(features_dir)+'/models/'+model_dir
# get default features
sampletype=model_dir.split('_')[0]
default_features=get_features(models, actual_model_dir, sampletype)
# now change to proper directory for featurization
if model_dir=='audio_models' and 'audio' in filetypes:
os.chdir(features_dir+'/audio_features')
elif model_dir=='text_models' and 'text' in filetypes:
models=models['text_models']
os.chdir(features_dir+'/text_features')
elif model_dir=='image_models' and 'image' in filetypes:
models=models['image_models']
os.chdir(features_dir+'/image_features')
elif model_dir=='video_models' and 'video' in filetypes:
models=models['video_models']
os.chdir(features_dir+'/video_features')
elif model_dir=='csv_models' and 'csv' in filetypes:
models=models['csv_models']
os.chdir(features_dir+'/csv_features')
# call featurization API via default features
for i in range(len(default_features)):
print(os.getcwd())
os.system('python3 featurize.py %s %s'%(load_dir, default_features[i]))
def find_files(model_dir):
print(model_dir)
jsonfiles=list()
csvfiles=list()
if model_dir == 'audio_models':
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.wav') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'text_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.txt') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'image_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.png') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'video_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.mp4') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir =='csv_models':
# csv files are a little different here
listdir=os.listdir()
for i in range(len(listdir)):
csvfile='featurized_'+listdir[i]
if listdir[i].endswith('.csv') and csvfile in listdir:
csvfiles.append(csvfile)
else:
jsonfiles=[]
print(jsonfiles)
return jsonfiles, csvfiles
def make_predictions(sampletype, transformer, clf, modeltype, jsonfiles, csvfiles, default_features, classes, modeldata, model_dir):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
sampletype=sampletype.split('_')[0]
if sampletype != 'csv':
for k in range(len(jsonfiles)):
try:
g=json.load(open(jsonfiles[k]))
print(sampletype)
print(g)
features=list()
print(default_features)
for j in range(len(default_features)):
print(sampletype)
features=features+g['features'][sampletype][default_features[j]]['features']
labels=g['features'][sampletype][default_features[0]]['labels']
print(transformer)
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features).reshape(1, -1))).reshape(1, -1)
else:
features=np.array(features).reshape(1,-1)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif modeltype == 'ludwig':
data=
|
pd.read_csv('test.csv')
|
pandas.read_csv
|
# Copyright 2018 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import ntpath
import tempfile
import time
import pandas as pd
import requests
from google.cloud import bigquery
from google.protobuf.timestamp_pb2 import Timestamp
import feast.sdk.client
from feast.core.CoreService_pb2 import CoreServiceTypes
from feast.sdk.resources.entity import Entity
from feast.sdk.resources.feature import Feature
from feast.sdk.utils.bq_util import head
from feast.sdk.utils.gs_utils import gcs_to_df, is_gs_path, df_to_gcs
from feast.sdk.utils.print_utils import spec_to_yaml
from feast.sdk.utils.types import dtype_to_value_type
from feast.specs.ImportSpec_pb2 import ImportSpec, Schema
class Importer:
def __init__(self, specs, df, properties):
self._properties = properties
self._specs = specs
self.df = df
@property
def source(self):
"""str: source of the data"""
return self._properties.get("source")
@property
def size(self):
"""str: number of rows in the data"""
return self._properties.get("size")
@property
def require_staging(self):
"""bool: whether the data needs to be staged"""
return self._properties.get("require_staging")
@property
def remote_path(self):
"""str: remote path of the file"""
return self._properties.get("remote_path")
@property
def spec(self):
"""feast.specs.ImportSpec_pb2.ImportSpec:
import spec for this dataset"""
return self._specs.get("import")
@property
def features(self):
"""list[feast.specs.FeatureSpec_pb2.FeatureSpec]:
list of features associated with this dataset"""
return self._specs.get("features")
@property
def entity(self):
"""feast.specs.EntitySpec_pb2.EntitySpec:
entity associated with this dataset"""
return self._specs.get("entity")
@classmethod
def from_csv(
cls,
path,
entity,
owner,
staging_location=None,
id_column=None,
feature_columns=None,
timestamp_column=None,
timestamp_value=None,
serving_store=None,
warehouse_store=None,
job_options={},
):
"""Creates an importer from a given csv dataset.
This file can be either local or remote (in gcs). If it's a local file
then staging_location must be determined.
Args:
path (str): path to csv file
entity (str): entity id
owner (str): owner
staging_location (str, optional): Defaults to None. Staging location
for ingesting a local csv file.
id_column (str, optional): Defaults to None. Id column in the csv.
If not set, will default to the `entity` argument.
feature_columns ([str], optional): Defaults to None. Feature columns
to ingest. If not set, the importer will by default ingest all
available columns.
timestamp_column (str, optional): Defaults to None. Timestamp
column in the csv. If not set, defaults to timestamp value.
timestamp_value (datetime, optional): Defaults to current datetime.
Timestamp value to assign to all features in the dataset.
serving_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Serving store to write the features in this instance to.
warehouse_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Warehouse store to write the features in this instance to.
job_options (dict): Defaults to empty dict. Additional job options.
Returns:
Importer: the importer for the dataset provided.
"""
src_type = "file.csv"
source_options = {}
source_options["path"], require_staging = _get_remote_location(
path, staging_location
)
if is_gs_path(path):
df = gcs_to_df(path)
else:
df = pd.read_csv(path)
schema, features = _detect_schema_and_feature(
entity,
owner,
id_column,
feature_columns,
timestamp_column,
timestamp_value,
serving_store,
warehouse_store,
df,
)
iport_spec = _create_import(
src_type, source_options, job_options, entity, schema
)
props = _properties(
src_type, len(df.index), require_staging, source_options["path"]
)
specs = _specs(iport_spec, Entity(name=entity), features)
return cls(specs, df, props)
@classmethod
def from_bq(
cls,
bq_path,
entity,
owner,
limit=10,
id_column=None,
feature_columns=None,
timestamp_column=None,
timestamp_value=None,
serving_store=None,
warehouse_store=None,
job_options={},
):
"""Creates an importer from a given bigquery table.
Args:
bq_path (str): path to bigquery table, in the format
project.dataset.table
entity (str): entity id
owner (str): owner
limit (int, optional): Defaults to 10. The maximum number of rows to
read into the importer df.
id_column (str, optional): Defaults to None. Id column in the csv.
If not set, will default to the `entity` argument.
feature_columns ([str], optional): Defaults to None. Feature columns
to ingest. If not set, the importer will by default ingest all
available columns.
timestamp_column (str, optional): Defaults to None. Timestamp
column in the csv. If not set, defaults to timestamp value.
timestamp_value (datetime, optional): Defaults to current datetime.
Timestamp value to assign to all features in the dataset.
serving_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Serving store to write the features in this instance to.
warehouse_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Warehouse store to write the features in this instance to.
job_options (dict): Defaults to empty dict. Additional job options.
Returns:
Importer: the importer for the dataset provided.
"""
cli = bigquery.Client()
project, dataset_id, table_id = bq_path.split(".")
dataset_ref = cli.dataset(dataset_id, project=project)
table_ref = dataset_ref.table(table_id)
table = cli.get_table(table_ref)
source_options = {"project": project, "dataset": dataset_id, "table": table_id}
df = head(cli, table, limit)
schema, features = _detect_schema_and_feature(
entity,
owner,
id_column,
feature_columns,
timestamp_column,
timestamp_value,
serving_store,
warehouse_store,
df,
)
iport_spec = _create_import(
"bigquery", source_options, job_options, entity, schema
)
props = _properties("bigquery", table.num_rows, False, None)
specs = _specs(iport_spec, Entity(name=entity), features)
return cls(specs, df, props)
@classmethod
def from_df(
cls,
df,
entity,
owner,
staging_location=None,
id_column=None,
feature_columns=None,
timestamp_column=None,
timestamp_value=None,
serving_store=None,
warehouse_store=None,
job_options={},
):
"""Creates an importer from a given pandas dataframe.
To import a file from a dataframe, the data will have to be staged.
Args:
path (str): path to csv file
entity (str): entity id
owner (str): owner
staging_location (str, optional): Staging location for ingesting a local csv file.
id_column (str, optional): Defaults to None. Id column in the csv.
If not set, will default to the `entity` argument.
feature_columns ([str], optional): Defaults to None. Feature columns
to ingest. If not set, the importer will by default ingest all
available columns.
timestamp_column (str, optional): Defaults to None. Timestamp
column in the csv. If not set, defaults to timestamp value.
timestamp_value (datetime, optional): Defaults to current datetime.
Timestamp value to assign to all features in the dataset.
serving_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Serving store to write the features in this instance to.
warehouse_store (feast.sdk.resources.feature.DataStore): Defaults to None.
Warehouse store to write the features in this instance to.
job_options (dict): Defaults to empty dict. Additional job options.
Returns:
Importer: the importer for the dataset provided.
"""
tmp_file_name = "tmp_{}_{}.csv".format(entity, int(round(time.time() * 1000)))
src_type = "file.csv"
source_options = {}
source_options["path"], require_staging = _get_remote_location(
tmp_file_name, staging_location
)
schema, features = _detect_schema_and_feature(
entity,
owner,
id_column,
feature_columns,
timestamp_column,
timestamp_value,
serving_store,
warehouse_store,
df,
)
iport_spec = _create_import(
src_type, source_options, job_options, entity, schema
)
props = _properties(
"dataframe", len(df.index), require_staging, source_options["path"]
)
specs = _specs(iport_spec, Entity(name=entity), features)
return cls(specs, df, props)
# noinspection PyProtectedMember
def stage(self, feast_client: feast.sdk.client.Client):
"""
Stage the data to its remote location
Args:
feast_client:
Returns: None
"""
if not self.require_staging:
return
ts_col = self.spec.schema.timestampColumn
if ts_col != "":
_convert_timestamp(self.df, ts_col)
# staging is required but user does not provide staging location
# importer will request on-demand upload URL from Feast Core
if not self.remote_path:
if self.spec.type.upper() != "FILE.CSV":
raise ValueError(
"Only type 'csv' is currently supported for using Importer without staging location"
)
request = CoreServiceTypes.GetUploadUrlRequest(
fileType=CoreServiceTypes.GetUploadUrlRequest.FileType.Value("CSV")
)
response = feast_client._core_service_stub.GetUploadUrl(request)
with tempfile.NamedTemporaryFile() as df_tempfile:
self.df.to_csv(df_tempfile.name, index=False)
requests.put(url=response.url, data=df_tempfile)
self.spec.sourceOptions.update({"path": f"gs://{response.path}"})
else:
df_to_gcs(self.df, self.remote_path)
def describe(self):
"""Print out the import spec.
"""
print(spec_to_yaml(self.spec))
def dump(self, path):
"""Dump the import spec to the provided path
Arguments:
path (str): path to dump the spec to
"""
with open(path, "w") as f:
f.write(spec_to_yaml(self.spec))
print("Saved spec to {}".format(path))
def _convert_timestamp(df, timestamp_col):
"""Converts the given df's timestamp column to ISO8601 format
"""
df[timestamp_col] =
|
pd.to_datetime(df[timestamp_col])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=
|
pd.DataFrame(compared_db)
|
pandas.DataFrame
|
import pandas as pd
from authentication.models import Profile
from broker.models import Broker
from restapi.helper_api import get_broker_user_data
from employee.models import Employee
from sme.models import Sme, SmeTaskEmail
from utils.models import AahoOffice, Bank
def update_aaho_office():
data = []
for sme in Sme.objects.all():
offices = sme.mb_bill_order_placed.all()
if not offices.exists():
offices = sme.mb_bill_paid_by.all()
if offices.count() > 0:
aaho_source_offices = list(offices.values_list('source_office__id', flat=True))
data = {}
for office in set(aaho_source_offices):
data[office] = aaho_source_offices.count(office)
office_id = max(data.keys(), key=lambda k: data[k])
aaho_office = AahoOffice.objects.get(id=office_id)
sme.aaho_office = aaho_office
sme.save()
def retrieve_sme():
data = []
for sme in Sme.objects.all():
print(sme)
data.append([
sme.id,
sme.get_name(),
sme.company_code,
sme.name.profile.email,
sme.aaho_office.branch_name if sme.aaho_office else '',
sme.city.name if sme.city else '',
sme.credit_period
])
df = pd.DataFrame(data=data, columns=['ID', 'Name', 'Code', 'Email', 'Branch', 'City', 'Credit Period'])
df.to_excel('customers.xlsx', index=False)
def check_wrong_customer():
df = pd.read_excel('../../data/Customer Name Cleanup 11.04.18.xlsx')
data = []
for i, row in df.iterrows():
try:
sme = Sme.objects.get(company_code=row['Customer Code'])
if row['Final Name'] != sme.get_name():
profile = Profile.objects.get(user=sme.name)
profile.name = row['Final Name']
profile.save()
data.append([
row['Customer Code'],
])
except Sme.DoesNotExist:
print(row, i)
def update_sme_email():
for sme in Sme.objects.exclude(company_code__in=['IDL', 'IDS', 'IDR', 'IDH', 'IDK']):
for sme_task in SmeTaskEmail.objects.all():
sme.email_tasks.add(sme_task)
def get_broker_data(user):
try:
broker = Broker.objects.get(name=user)
except Broker.DoesNotExist:
return {'status': 'failure', 'msg': 'User Broker does not exist', 'data': {}}
try:
profile = Profile.objects.get(user=user)
except Profile.DoesNotExist:
profile = Profile.objects.create(user=user, name=user.first_name, email=user.email)
accounts = Bank.objects.filter(user=user)
accounts_data = []
for ac in accounts:
ac_data = ac.to_json()
accounts_data.append(ac_data)
data = {
'user': get_broker_user_data(user, profile, broker=broker),
'accounts_data': accounts_data,
'aaho_office': broker.aaho_office.to_json() if broker.aaho_office else {},
}
return {'status': 'success', 'data': data}
def broker_intital_data_test():
for broker in Broker.objects.all():
print(get_broker_data(broker.name))
def update_sme_material():
df = pd.read_excel('/Users/aaho/Downloads/Customer Thu, Sep 6, 2018, 216 PM.xlsx')
df = df.fillna('')
for i, row in df.iterrows():
if row['Material']:
try:
sme = Sme.objects.get(company_code=row['Code'])
material = row['Material']
sme.material = material
sme.save()
except Sme.DoesNotExist:
print(row, i)
def update_sme_aaho_office():
df =
|
pd.read_excel('/Users/aaho/Downloads/Customer POC and Office Update.xlsx', sheet_name='Office Update')
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period +
|
pd.DateOffset(months=-12)
|
pandas.DateOffset
|
import pandas as pd
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
class spatial_mapping():
def __init__(self, data, gps, gps_utc=0):
df=pd.DataFrame(data)
df[0]=pd.to_datetime(df[0]-693962,unit='D',origin=pd.Timestamp('1900-01-01'),utc=True)
df=df.rename(columns={0:'Time'})
self.data=df
if type(gps)==str:
if gps[-3:].lower()=='csv':
self.gps=
|
pd.read_csv(gps)
|
pandas.read_csv
|
"""
Plotting code for GPU hardware metrics (i.e., SM occupancy, SM efficiency),
and miscellaneous experiments with GPU utilization.
"""
from rlscope.profiler.rlscope_logging import logger
import argparse
import traceback
import bdb
import copy
import re
import sys
import itertools
import os
import csv
import textwrap
import pprint
import math
from io import StringIO
import json
import codecs
import pandas as pd
from rlscope.parser.plot_utils import setup_matplotlib
setup_matplotlib()
import matplotlib
import matplotlib.ticker
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join as _j, abspath as _a, dirname as _d, exists as _e, basename as _b
from rlscope.profiler.util import pprint_msg
from rlscope.parser.stacked_bar_plots import get_x_env, get_x_algo, xfields_from_xtick_expression, get_capsize, OverlapStackedBarPlot, add_repetition, group_numeric_cols
from rlscope.parser.dataframe import UtilDataframeReader, RLScopeConfig
from rlscope import py_config
from rlscope.parser.common import *
from rlscope.parser import constants
from rlscope.parser.plot_utils import is_pdf, pdf2png
from rlscope.py_config import yes_as_bool
from typing import *
class IMLInvaidArgument(Exception):
pass
def maybe_number(x):
if type(x) != str:
return x
try:
num = int(x)
return num
except ValueError:
pass
try:
num = float(x)
return num
except ValueError:
pass
return x
def parse_filename_attrs(
path : str,
file_prefix : str,
file_suffix : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
components = re.split(r'\.', _b(path))
assert components[0] == file_prefix
assert components[-1] == file_suffix
attr_strings = components[1:len(components)-1]
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
for attr_string in attr_strings:
m = re.fullmatch(attr_string_regex, attr_string)
if not m:
raise RuntimeError(f"""
Not sure how to parse attribute name/value from \"{attr_string}\" found in {_b(path)}.
Attributes we recognize = {attrs}
""")
attr_vals[m.group('attr_name')] = m.group('attr_value')
return attr_vals
def parse_path_attrs(
path : str,
attrs : Iterable[str],
dflt_attrs : Optional[Dict[str, Any]] = None,
attr_types : Optional[Dict[str, Any]] = None,
debug : bool = False,
):
attr_name_regex = r'(?:{regex})'.format(
regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr)))
)
attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)\b'.format(
attr_name=attr_name_regex
)
# e.g.
# path = 'GPUHwCounterSampler.thread_blocks_68.thread_block_size_1024.csv'
if debug:
logger.info(f"attr_name_regex = {attr_name_regex}")
attr_vals = dict()
if dflt_attrs is not None:
attr_vals.update(dflt_attrs)
path_components = os.path.split(path)
for path_component in path_components:
# e.g.
# ['GPUHwCounterSampler', 'thread_blocks_68', 'thread_block_size_1024', 'csv']
attr_strings = re.split(r'\.', path_component)
for attr_string in attr_strings:
m = re.search(attr_string_regex, attr_string)
if m:
value = m.group('attr_value')
attr_name = m.group('attr_name')
if attr_types is not None and attr_name in attr_types:
value = attr_types[attr_name](value)
attr_vals[attr_name] = value
# if not m:
# raise RuntimeError(f"""
# Not sure how to parse attribute name/value from \"{attr_string}\" found in {path}.
# Attributes we recognize = {attrs}
# """)
missing_attrs = set(attrs).difference(attr_vals.keys())
if len(missing_attrs) > 0:
raise RuntimeError(f"""
Couldn't find all required attributes in {path}.
Attributes we are missing = {missing_attrs}
""")
return attr_vals
METRIC_NAME_CUPTI_TO_PROF = {
# Deprecated CUPTI metric API -- achieved_occupancy:
# Id = 1205
# Shortdesc = Achieved Occupancy
# Longdesc = Ratio of the average active warps per active cycle to the maximum number of warps supported on a multiprocessor
'achieved_occupancy': "sm__warps_active.avg.pct_of_peak_sustained_active",
# Deprecated CUPTI metric API -- sm_efficiency:
# Id = 1203
# Shortdesc = Multiprocessor Activity
# Longdesc = The percentage of time at least one warp is active on a multiprocessor averaged over all multiprocessors on the GPU
# See CUPTI documentation for mapping to new "Profiling API" metric name:
# https://docs.nvidia.com/cupti/Cupti/r_main.html#metrics_map_table_70
'sm_efficiency': "smsp__cycles_active.avg.pct_of_peak_sustained_elapsed",
# Deprecated CUPTI metric API -- inst_executed:
# Metric# 90
# Id = 1290
# Name = inst_executed
# Shortdesc = Instructions Executed
# Longdesc = The number of instructions executed
'inst_executed': "smsp__inst_executed.sum",
# Deprecated CUPTI metric API -- active_cycles:
# Event# 25
# Id = 2629
# Name = active_cycles
# Shortdesc = Active cycles
# Longdesc = Number of cycles a multiprocessor has at least one active warp.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_cycles': "sm__cycles_active.sum",
# Deprecated CUPTI metric API -- active_warps:
# Event# 26
# Id = 2630
# Name = active_warps
# Shortdesc = Active warps
# Longdesc = Accumulated number of active warps per cycle. For every cycle it increments by the number of active warps in the cycle which can be in the range 0 to 64.
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'active_warps': "sm__warps_active.sum",
# Deprecated CUPTI metric API -- elapsed_cycles_sm:
# Event# 33
# Id = 2193
# Name = elapsed_cycles_sm
# Shortdesc = Elapsed clocks
# Longdesc = Elapsed clocks
# Category = CUPTI_EVENT_CATEGORY_INSTRUCTION
'elapsed_cycles_sm': "sm__cycles_elapsed.sum",
}
PROF_TO_METRIC_NAME_CUPTI = dict((v, k) for k, v in METRIC_NAME_CUPTI_TO_PROF.items())
# HACK: number of total SMs on the RTX 2080 GPU on the "eco" cluster machines
NUM_SMS = 68
SM_OCCUPANCY_TITLE = "SM occupancy: average percent of warps\nthat are in use within an SM"
SM_EFFICIENCY_TITLE = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
SM_EFFICIENCY_Y_LABEL = f"SM efficiency (%)\n# SMs = {NUM_SMS}"
SM_OCCUPANCY_Y_LABEL = "SM occupancy (%)\nmax threads per block = 1024"
SAMPLE_THROUGHPUT_Y_LABEL = "Throughput (samples/second)"
SAMPLE_LATENCY_Y_LABEL = "Minibatch latency (ms)"
CUPTI_METRIC_Y_LABEL = {
'sm_efficiency': SM_EFFICIENCY_Y_LABEL,
'achieved_occupancy': SM_OCCUPANCY_Y_LABEL,
}
CUPTI_METRIC_Y_LABEL_SHORT = {
'sm_efficiency': "SM efficiency (%)",
'achieved_occupancy': "SM occupancy (%)",
}
TRT_METRIC_YLABELS = {
'host_latency_throughput_qps': SAMPLE_THROUGHPUT_Y_LABEL,
'gpu_compute_mean_ms': "Mean GPU compute time (ms)",
'gpu_compute_percentile_99_ms': "99%-tile GPU compute time (ms)",
}
BATCH_SIZE_X_LABEL = "Batch size"
STREAMS_X_LABEL = "# of CUDA streams"
SIMULATOR_X_LABEL = "Simulator"
STEP_THROUGHPUT_Y_LABEL = "Simulation throughput (samples/sec)"
STEP_LATENCY_Y_LABEL = "Simulation latency (ms)"
RLSCOPE_X_LABEL = "(RL algorithm, Simulator)"
SM_ID_X_LABEL = f"SM ID\n# SMs = {NUM_SMS}"
GPU_UTIL_EXPERIMENT_ATTRS = {
'thread_blocks',
'thread_block_size',
'n_launches',
'iterations',
'num_threads',
'processes',
'hw_counters',
}
GPU_UTIL_EXPERIMENT_ATTR_TYPES = {
'thread_blocks': maybe_number,
'thread_block_size': maybe_number,
'n_launches': maybe_number,
'iterations': maybe_number,
'num_threads': maybe_number,
'processes': yes_as_bool,
'hw_counters': yes_as_bool,
}
MULTI_TASK_ATTRS = set(GPU_UTIL_EXPERIMENT_ATTRS)
MULTI_TASK_ATTRS.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample',
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id',
'stream_id',
'trace_id',
})
MULTI_TASK_JSON_ATTRS = {
## From contents of: GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
"globaltimer_ns",
"kernel_id",
"lane_id",
"sm_id",
"stream_id",
"warp_id",
}
MULTI_TASK_ATTR_TYPES = dict(GPU_UTIL_EXPERIMENT_ATTR_TYPES)
MULTI_TASK_ATTR_TYPES.update({
## From directory attrs
# 'thread_blocks',
# 'thread_block_size',
# 'n_launches',
# 'iterations',
# 'num_threads',
'iterations_per_sched_sample': maybe_number,
# 'processes',
# 'hw_counters',
## GPUComputeSchedInfoKernel.thread_id_9.stream_id_9.trace_id_0.json
'thread_id': maybe_number,
'stream_id': maybe_number,
'trace_id': maybe_number,
})
MULTI_TASK_RAW_ATTR_TYPES = dict(MULTI_TASK_ATTR_TYPES)
MULTI_TASK_RAW_ATTR_TYPES.update({
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
})
# MULTI_TASK_RAW_ATTR_DFLTS = dict(MULTI_TASK)
MULTI_TASK_RAW_ATTR_DFLTS = {
'num_sms': None,
'sms_allocated': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
MULTI_TASK_RAW_ATTRS = MULTI_TASK_ATTRS.union(MULTI_TASK_RAW_ATTR_TYPES.keys()).difference({
'stream_id',
'thread_id',
'trace_id',
})
# suffix=".num_sms_${NUM_SMS}.sms_allocated_${sms_allocated}.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_${CUDA_MPS_ACTIVE_THREAD_PERCENTAGE}"
# all_cycles:
# the metric is computed over all cycles on the GPU, including cycles where the GPU
# is idle and not executing any kernels.
# active_cycles:
# the metric is computed over active GPU cycles.
# Measurement periods where the GPU is idle result in a metric value of "0".
MEASUREMENT_PERIOD_ACTIVE_CYCLES = 'active_cycles'
MEASUREMENT_PERIOD_ALL_CYCLES = 'all_cycles'
CUPTI_METRIC_MEASUREMENT_PERIOD = {
'achieved_occupancy': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'sm_efficiency': MEASUREMENT_PERIOD_ALL_CYCLES,
'inst_executed': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_cycles': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'active_warps': MEASUREMENT_PERIOD_ACTIVE_CYCLES,
'elapsed_cycles_sm': MEASUREMENT_PERIOD_ALL_CYCLES,
}
FLOAT_RE = r'(?:[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)'
UNIT_RE = r'(?:\b(?:ms|s|qps)\b)'
class TrtexecExperiment:
def __init__(self, args):
self.args = args
def run(self):
self.read_df()
self.plot_df()
def read_df(self):
self._read_trtexec_df()
self._read_tf_inference_df()
self._read_simulator_df()
self._read_mps_df()
"""
TODO: merge trtexec_df and tf_inference_df
trtexec_field tf_inference_field
host_latency_throughput_qps throughput_qps
"""
def plot_df(self):
"""
Plot trtexec7 experiments.
:return:
"""
"""
batch_size = 1, 8, 16, 32, 64
streams = 1
plot:
throughput
sm_efficiency
sm_occupancy
"""
def _plot_batch_size_vs(streams, suffix=None):
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
suffix=suffix)
def filter_tensorflow(plot_df):
plot_df = plot_df[plot_df['config'] == 'TF']
return plot_df
self._plot_batch_size_vs_throughput(
title="Throughput with increasing batch size",
streams=streams,
filter_df=filter_tensorflow,
suffix=f"{or_empty(suffix)}.just_tensorflow")
self._plot_batch_size_vs_metric(
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
streams=streams,
suffix=suffix)
self._plot_batch_size_vs_metric(
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
streams=streams,
suffix=suffix)
_plot_batch_size_vs(streams=1)
def _plot_streams_vs(batch_size, suffix=None):
def _title(title):
return f"{title}:\n(batch size = {batch_size})"
trt_metric_title = {
'host_latency_throughput_qps': _title("Throughput with increasing streams"),
'gpu_compute_mean_ms': _title("Mean GPU compute time with increasing streams"),
'gpu_compute_percentile_99_ms': _title("99-%tile GPU compute time with increasing streams"),
}
cuda_graph_dict = {
'host_latency_throughput_qps': None,
'gpu_compute_mean_ms': None,
'gpu_compute_percentile_99_ms': None,
}
for trt_metric in trt_metric_title.keys():
self._plot_streams_vs_trt_metric(
trt_metric, batch_size,
title=trt_metric_title[trt_metric],
cuda_graph=cuda_graph_dict.get(trt_metric, None))
# self._plot_streams_vs_throughput(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
# batch_size=batch_size,
# suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_EFFICIENCY_TITLE,
cupti_metric='sm_efficiency',
batch_size=batch_size,
suffix=suffix)
self._plot_streams_vs_metric(
# title="Throughput with increasing streams\n(batch size = {batch_size})".format(batch_size=batch_size),
title=SM_OCCUPANCY_TITLE,
cupti_metric='achieved_occupancy',
batch_size=batch_size,
suffix=suffix)
"""
batch_size = 1
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
_plot_streams_vs(batch_size=1)
if self.trtexec_df is not None:
"""
batch_size = (best batch size for streams == 1)
streams = 1, 2, 3, ..., 8
plot:
throughput
sm_efficiency
sm_occupancy
"""
best_batch_size = self._compute_best_batch_size()
_plot_streams_vs(batch_size=best_batch_size, suffix='.best_batch_size')
self._plot_simulator_vs_steptime()
self._plot_simulator_vs_throughput()
def _plot_multiprocess_inference(df, throughput_title=None, inference_title=None, filter_df=None, suffix=None):
# if throughput_title is None:
# throughput_title = 'Increasing inference throughput when slicing SMs with CUDA MPS processes'
# if inference_title is None:
# inference_title = 'Inference latency when slicing SMs with CUDA MPS processes'
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_df,
metric='throughput_qps',
title=throughput_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_THROUGHPUT_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=True,
)
self._plot_mps_batch_size_vs_metric_by_num_tasks(
df=self.mps_raw_df,
metric='inference_time_ms',
title=inference_title,
xlabel=BATCH_SIZE_X_LABEL,
ylabel=SAMPLE_LATENCY_Y_LABEL,
filter_df=filter_df,
suffix=suffix,
global_ymax=False,
)
"""
3 different graphs for multi-process experiment:
- Multi-process (CPU) / config_cpu
row['cpu']
assert not row['mps']
- Multi-process MPS (GPU) / config_mps_gpu_evenly
row['mps'] and row['sm_alloc_strategy'] == 'evenly'
assert not row['cpu']
- Multi-process MPS (GPU) / config_mps_gpu_evenly_x2
row['mps'] and row['sm_alloc_strategy'] == 'evenly_x2'
assert not row['cpu']
- Multi-process (GPU, no MPS) / config_gpu
not row['mps'] and not row['cpu']
"""
def is_config_cpu(row):
is_cpu = row['cpu']
if is_cpu:
assert not row['mps']
return is_cpu
# def is_config_mps_gpu_evenly(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly'
#
# def is_config_mps_gpu_evenly_x2(row):
# is_mps = row['mps']
# if is_mps:
# assert not row['cpu']
# return is_mps and row['sm_alloc_strategy'] == 'evenly_x2'
def is_config_mps_gpu(row):
is_mps = row['mps']
if is_mps:
assert not row['cpu']
return is_mps
def is_config_gpu(row):
return not row['mps'] and not row['cpu']
def as_row_filter_func(is_config):
def row_filter_func(df):
df = df[df.apply(is_config, axis=1)]
return df
return row_filter_func
# throughput_ymax = self.mps_df['']
sm_alloc_strategies = self.mps_df[self.mps_df['mps']]['sm_alloc_strategy'].unique().tolist()
for sm_alloc_strategy in sm_alloc_strategies:
def _is_config(row):
return is_config_mps_gpu(row) and row['sm_alloc_strategy'] == sm_alloc_strategy
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU) + CUDA MPS',
inference_title='Inference latency:\nmulti-process TF scripts (GPU) + CUDA MPS',
filter_df=as_row_filter_func(_is_config),
suffix=f".config_mps_gpu_{sm_alloc_strategy}")
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly), suffix='.config_mps_gpu_evenly')
# _plot_multiprocess_inference(self.mps_df, filter_df=as_row_filter_func(is_config_mps_gpu_evenly_x2), suffix='.config_mps_gpu_evenly_x2')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (CPU)',
inference_title='Inference latency:\nmulti-process TF scripts (CPU)',
filter_df=as_row_filter_func(is_config_cpu),
suffix='.config_cpu')
_plot_multiprocess_inference(
self.mps_df,
throughput_title='Inference throughput:\nmulti-process TF scripts (GPU)',
inference_title='Inference latency:\nmulti-process TF scripts (GPU)',
filter_df=as_row_filter_func(is_config_gpu),
suffix='.config_gpu')
def _compute_best_batch_size(self):
df = self.trtexec_df[self.trtexec_df['streams'] == 1]
max_throughput = df['host_latency_throughput_qps'].max()
batch_sizes = df[df['host_latency_throughput_qps'] == max_throughput]['batch_size'].unique()
assert len(batch_sizes) == 1
best_batch_size = batch_sizes[0]
return best_batch_size
def _plot_streams_vs_metric(self, title, cupti_metric, batch_size, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
df = copy.copy(self.trtexec_gpu_hw_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="streams", y="metric_value",
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(STREAMS_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{cupti_metric}.batch_size_{batch_size}{suffix}.svg'))
def _plot_batch_size_vs_metric(self, title, cupti_metric, streams, ylabel=None, suffix=None):
if self.trtexec_gpu_hw_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'metric_value', 'config'])
if self.trtexec_gpu_hw_df is not None:
df = copy.copy(self.trtexec_gpu_hw_df)
df = df[df['streams'] == streams]
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_gpu_hw_df is not None:
df = copy.copy(self.tf_inference_gpu_hw_df)
df = df[df['range_name'] == 'inference_loop/inference']
df = keep_cupti_metric(df, cupti_metric)
add_gpu_hw_fields(df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="metric_value",
hue="config",
data=plot_df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is None:
ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric]
g.set_ylabels(ylabel)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_{cupti_metric}.streams_{streams}{suffix}.svg'))
def _plot_streams_vs_trt_metric(self, trt_metric, batch_size, title=None, ylabel=None, alias=None, cuda_graph=None, suffix=None):
if self.trtexec_df is None:
return
if alias is None:
alias = trt_metric
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
plot_kwargs = dict(
x="streams",
y=trt_metric,
kind="bar",
palette="muted",
)
if cuda_graph is None:
plot_kwargs.update(dict(
hue="config",
))
elif cuda_graph:
df = df[df['cuda_graph']]
else:
df = df[~ df['cuda_graph']]
plot_kwargs.update(dict(
data=df,
))
g = sns.catplot(**plot_kwargs)
g.despine(left=True)
if ylabel is None:
ylabel = TRT_METRIC_YLABELS[trt_metric]
g.set_ylabels(ylabel)
# if xlabel is not None:
g.set_xlabels(STREAMS_X_LABEL)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
ss = StringIO()
if cuda_graph is None:
pass
elif cuda_graph:
ss.write(f".cuda_graph_yes")
else:
ss.write(f".cuda_graph_no")
if suffix is not None:
ss.write(f".{suffix}")
ss = ss.getvalue()
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{alias}.batch_size_{batch_size}{ss}.svg'))
def _plot_mps_batch_size_vs_metric_by_num_tasks(self, df, metric, title=None, xlabel=None, ylabel=None, filter_df=None, suffix=None, global_ymax=False):
"""
Throughput graph:
Y-axis = throughput
X-axis (major) = batch-size (larger impact on throughput)
X-axis (minor) = num_tasks (lesser impact on throughput)
Latency graph:
Y-axis = latency samples (mean/std across all processes)
X-axis (major) = batch-size (larger impact on latency)
X-axis (minor) = num_tasks (lesser impact on latency)
"""
if df is None:
return
df = copy.copy(df)
assert metric in df
# df = self._add_config(df, df_type='trtexec')
global_df = df
if filter_df is not None:
df = filter_df(df)
sns.set(style="whitegrid")
g = sns.catplot(x="batch_size",
y=metric,
# data=df,
hue="config",
data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
if ylabel is not None:
g.set_ylabels(ylabel)
if xlabel is not None:
g.set_xlabels(xlabel)
if title is not None:
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if global_ymax:
new_ymax = global_df[metric].max()
ymin, ymax = g.ax.get_ylim()
g.ax.set_ylim((ymin, max(ymax, new_ymax)))
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['mps_dir'], f'mps_batch_size_vs_{metric}_by_num_tasks{suffix}.svg'))
def _plot_streams_vs_throughput(self, title, batch_size, suffix=None):
if self.trtexec_df is None:
return
df = copy.copy(self.trtexec_df)
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
df = df[df['batch_size'] == batch_size]
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
df = self._add_config(df, df_type='trtexec')
sns.set(style="whitegrid")
g = sns.catplot(x="streams", y="host_latency_throughput_qps",
# data=df,
hue="config", data=df,
# hue="num_threads", data=df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(STREAMS_X_LABEL)
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_throughput.batch_size_{batch_size}{suffix}.svg'))
def _add_config(self, df, df_type):
assert df_type in {'trtexec', 'tf_inference'}
if df_type == 'trtexec':
def _config(row):
if row['cuda_graph']:
return 'TensorRT - CUDA graph ON'
return 'TensorRT'
df['config'] = df.apply(_config, axis=1)
elif df_type == 'tf_inference':
def _config(row):
if row['xla']:
return 'TF - XLA ON'
return 'TF'
df['config'] = df.apply(_config, axis=1)
else:
raise NotImplementedError()
return df
def _plot_batch_size_vs_throughput(self, title, streams, filter_df=None, suffix=None):
if self.trtexec_df is None:
return
"""
WANT:
x_field: batch_size
y_field: metric_value
group_field: num_threads
"""
plot_df = pd.DataFrame(columns=['batch_size', 'throughput_qps', 'config'])
if self.trtexec_df is not None:
df = copy.copy(self.trtexec_df)
df = df[df['streams'] == streams]
df.rename(columns={
'host_latency_throughput_qps': 'throughput_qps',
}, inplace=True)
df = self._add_config(df, df_type='trtexec')
plot_df = plot_df.append(df[plot_df.columns])
if self.tf_inference_result_df is not None:
df = copy.copy(self.tf_inference_result_df)
df = self._add_config(df, df_type='tf_inference')
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['config', 'batch_size'], inplace=True)
if filter_df is not None:
plot_df = filter_df(plot_df)
# df = keep_cupti_metric(df, cupti_metric)
# titled_df = copy.copy(df)
# col_titles = {
# 'num_threads': 'Number of threads',
# }
# titled_df.rename(columns=col_titles, inplace=True)
sns.set(style="whitegrid")
# df = df[["thread_blocks", "metric_value", "num_threads"]]
g = sns.catplot(x="batch_size", y="throughput_qps",
# data=df,
hue="config", data=plot_df,
# hue=col_titles["num_threads"], data=titled_df,
# height=6,
kind="bar",
palette="muted"
)
g.despine(left=True)
g.set_ylabels(SAMPLE_THROUGHPUT_Y_LABEL)
g.set_xlabels(BATCH_SIZE_X_LABEL)
# title = "SM efficiency: percent of SMs\nthat are in use across the entire GPU"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
if suffix is None:
suffix = ""
save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_throughput.streams_{streams}{suffix}.svg'))
def parse_trtexec_logs_as_df(self, logs):
def each_field_value(log):
for section in log:
for attr, value in log[section].items():
field = f"{section}_{attr}"
yield field, value
all_fields = set()
if len(logs) > 0:
all_fields = set([field for field, value in each_field_value(logs[0])])
data = dict()
for log in logs:
for field, value in each_field_value(log):
if field not in all_fields:
raise RuntimeError(f"Saw unexpected field={field}; expected one of {all_fields}")
if field not in data:
data[field] = []
data[field].append(value)
df = pd.DataFrame(data)
return df
def parse_trtexec_log(self, trtexec_log_path):
"""
{
'host_latency': {
'min_ms': 0.123,
'mean_ms': 0.123,
...
}
}
:param trtexec_log_path:
:return:
"""
with open(trtexec_log_path) as f:
section = None
data = dict()
def strip_log_prefix(line):
line = re.sub(r'^\[[^\]]+\]\s+\[I\]\s+', '', line)
return line
def as_attr(section):
attr = section
attr = re.sub(' ', '_', attr)
attr = attr.lower()
return attr
def parse_section(line):
m = re.search(r'(?P<section>Host Latency|GPU Compute|Enqueue Time)$', line, flags=re.IGNORECASE)
if m:
section = as_attr(m.group('section'))
return section
return None
def parse_e2e_metric(line):
# NOTE: end-to-end is the time = endOutput - startInput
# non-end-to-end = (endInput + startInput) + (endCompute + startCompute) + (endOutput + startOutput)
# So, "end-to-end" will include some time spent host-side, whereas non-end-to-end just includes time spent GPU side
# (the transfers, the kernel running).
m = re.search(r'(?P<name>min|max|mean|median): (?P<value>{float}) {unit} \(end to end (?P<e2e_value>{float}) (?P<unit>{unit})\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
# Just ignore this value...
value = float(m.group('value'))
e2e_value = float(m.group('e2e_value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': e2e_value,
}
return None
def parse_metric_with_unit(line):
m = re.search(r'(?P<name>[a-zA-Z][a-zA-Z ]+): (?P<value>{float}) (?P<unit>{unit})'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{unit}".format(name=m.group('name'), unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_percentile(line):
m = re.search(r'(?P<name>percentile): (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def parse_e2e_percentile(line):
m = re.search(r'(?P<name>percentile): [^(]+\(end to end (?P<value>{float}) (?P<unit>{unit}) at (?P<percent>\d+)%\)'.format(
float=FLOAT_RE,
unit=UNIT_RE), line)
if m:
value = float(m.group('value'))
name = "{name}_{percent}_{unit}".format(
name=m.group('name'),
percent=m.group('percent'),
unit=m.group('unit'))
name = as_attr(name)
return {
'name': name,
'value': value,
}
return None
def _add_parsed_value(dic):
if section not in data:
data[section] = dict()
data[section][dic['name']] = dic['value']
for lineno, line in enumerate(f, start=1):
line = line.rstrip()
ret = parse_section(line)
if ret:
section = ret
continue
if section is None:
continue
line = strip_log_prefix(line)
ret = parse_e2e_metric(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_e2e_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_percentile(line)
if ret:
_add_parsed_value(ret)
continue
ret = parse_metric_with_unit(line)
if ret:
_add_parsed_value(ret)
continue
if self.debug:
logger.info("Skip {path}:{lineno}: {line}".format(
path=trtexec_log_path,
lineno=lineno,
line=line,
))
return data
@property
def debug(self):
return self.args['debug']
def _read_mps_df(self):
self.mps_df = None
self.mps_raw_df = None
if self.args['mps_dir'] is None:
return
"""
/home/jgleeson/clone/rlscope/output/microbench_inference_multiprocess/batch_size_128.num_tasks_1.env_id_BreakoutNoFrameskip-v4.num_sms_68.sms_allocated_68.CUDA_MPS_ACTIVE_THREAD_PERCENTAGE_100.0
"""
mps_dflt_attrs = {
'num_sms': None,
'sms_allocated': None,
'sm_alloc_strategy': None,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None,
}
mps_attr_types = {
'mps': yes_as_bool,
'cpu': yes_as_bool,
'batch_size': maybe_number,
'num_tasks': maybe_number,
'env_id': str,
'num_sms': maybe_number,
'sms_allocated': maybe_number,
'sm_alloc_strategy': str,
'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number,
}
mps_attrs = set(mps_attr_types.keys())
dfs = []
raw_dfs = []
for path in each_file_recursive(self.args['mps_dir']):
if not re.search(r'^mode_microbench_inference_multiprocess\.merged\.json$', _b(path)):
continue
js = load_json(path)
df = pd.DataFrame(
dict((k, [v]) for k, v in js['summary_metrics'].items())
)
attr_dict = parse_path_attrs(
path,
mps_attrs,
mps_dflt_attrs,
mps_attr_types,
)
for attr_name, attr_value in attr_dict.items():
df[attr_name] = attr_value
dfs.append(df)
# Q: Should we discard outliers...?
raw_df = pd.DataFrame(data=js['raw_samples'])
for attr_name, attr_value in attr_dict.items():
raw_df[attr_name] = attr_value
raw_dfs.append(raw_df)
self.mps_df = pd.concat(dfs)
self.mps_raw_df = pd.concat(raw_dfs)
def _add_config(df):
def _config(row):
if row['mps']:
assert row['CUDA_MPS_ACTIVE_THREAD_PERCENTAGE'] is not None
return multitask_title('process MPS', 'processes', n_tasks=row['num_tasks'], sep=' ')
assert row['CUDA_MPS_ACTIVE_THREAD_PERCENTAGE'] is None
return multitask_title('process', 'processes', n_tasks=row['num_tasks'], sep=' ')
df['config'] = df.apply(_config, axis=1)
return df
def _sort(df):
df = df.sort_values(by=['batch_size', 'num_tasks'])
return df
def _prepare_df(df):
df = _add_config(df)
df = _sort(df)
return df
self.mps_df = _prepare_df(self.mps_df)
self.mps_raw_df = _prepare_df(self.mps_raw_df)
self.mps_raw_df['inference_time_ms'] = self.mps_raw_df['inference_time_sec'] * 1000
logger.info("mps dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.mps_df), indent=1),
))
logger.info("mps_raw dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.mps_raw_df), indent=1),
))
def _read_simulator_df(self):
self.simulator_df = None
self.simulator_raw_df = None
if self.args['simulator_dir'] is None:
return
"""
/home/jgleeson/clone/rlscope/output/simulator/batch_size_8.xla_no/GPUHwCounterSampler.csv
"""
simulator_dflt_attrs = {
}
simulator_attrs = {
'env_id',
}
simulator_attr_types = {
'env_id': str,
}
dfs = []
raw_dfs = []
for path in each_file_recursive(self.args['simulator_dir']):
if not re.search(r'^mode_microbench_simulator\.json$', _b(path)):
continue
js = load_json(path)
df = pd.DataFrame(
dict((k, [v]) for k, v in js['summary_metrics'].items())
)
sm_attrs = parse_path_attrs(
path,
simulator_attrs,
simulator_dflt_attrs,
simulator_attr_types,
)
for attr_name, attr_value in sm_attrs.items():
df[attr_name] = attr_value
dfs.append(df)
# Q: Should we discard outliers...?
raw_df = pd.DataFrame(data={
'step_time_sec': js['raw_samples']['step_time_sec']
})
for attr_name, attr_value in sm_attrs.items():
raw_df[attr_name] = attr_value
raw_dfs.append(raw_df)
self.simulator_df = pd.concat(dfs)
self.simulator_raw_df = pd.concat(raw_dfs)
self.simulator_df = self._add_simulator(self.simulator_df)
self.simulator_raw_df = self._add_simulator(self.simulator_raw_df)
logger.info("simulator dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.simulator_df), indent=1),
))
logger.info("simulator_raw dataframe:\n{msg}".format(
msg=txt_indent(DataFrame.dataframe_string(self.simulator_raw_df), indent=1),
))
def _plot_simulator_vs_steptime(self):
"""
x = simulator
y = mean step time (seconds)
:return:
"""
if self.simulator_raw_df is None:
return
plot_df = pd.DataFrame(columns=['simulator', 'step_time_ms'])
if self.simulator_raw_df is not None:
df = copy.copy(self.simulator_raw_df)
df['step_time_ms'] = df['step_time_sec'] * 1000
plot_df = plot_df.append(df[plot_df.columns])
plot_df.sort_values(by=['simulator', 'step_time_ms'], inplace=True)
sns.set(style="whitegrid")
# Boxplot?
# CDF?
g = sns.catplot(x="simulator", y="step_time_ms",
data=plot_df,
kind="bar",
palette="muted")
g.despine(left=True)
g.set_ylabels(STEP_LATENCY_Y_LABEL)
g.set_xlabels(SIMULATOR_X_LABEL)
title = "Simulation latency"
g.fig.suptitle(title)
g.fig.subplots_adjust(top=0.90)
g.fig.axes[0].set_xticklabels(
g.fig.axes[0].get_xticklabels(),
rotation=self.arg('rotation', 15),
)
save_plot(plot_df, _j(self.args['simulator_dir'], f'simulator_vs_latency.svg'))
def arg(self, name, default=None):
if name not in self.args or self.args[name] is None:
return default
return self.args[name]
def _add_simulator(self, df):
def _simulator(row):
return get_x_env(row['env_id'])
df['simulator'] = df.apply(_simulator, axis=1)
return df
def _plot_simulator_vs_throughput(self):
"""
x = simulator
y = steps per second (samples/second)
:return:
"""
if self.simulator_df is None:
return
plot_df =
|
pd.DataFrame(columns=['simulator', 'throughput_step_per_sec'])
|
pandas.DataFrame
|
# Starter code for multiple regressors implemented by <NAME>
# Source code based on Forecasting Favorites, 1owl
# https://www.kaggle.com/the1owl/forecasting-favorites , version 10
# Part II
import numpy as np
import pandas as pd
from sklearn import preprocessing, linear_model, metrics
import gc; gc.enable()
import random
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import TheilSenRegressor, BayesianRidge
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
import time
np.random.seed(1122)
# store the total processing time
start_time = time.time()
tcurrent = start_time
print('Three regressors - Neural network (MLP), Bayesian Ridge, Bagging(4x) and XGBoost (2x)\n')
print('Datasets reading')
# read datasets
dtypes = {'id':'int64', 'item_nbr':'int32', 'store_nbr':'int8', 'onpromotion':str}
data = {
#R 'tra': pd.read_csv('../input/train.csv', dtype=dtypes, parse_dates=['date']),
'tra': pd.read_csv('../input/processed/train_4r.csv', dtype=dtypes, parse_dates=['date']),
'tes': pd.read_csv('../input/test.csv', dtype=dtypes, parse_dates=['date']),
'ite':
|
pd.read_csv('../input/items.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not
|
notnull(-np.inf)
|
pandas.core.dtypes.missing.notnull
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a':
|
lrange(4)
|
pandas.compat.lrange
|
#!/usr/bin/env python
"""cmst.py: Climaproof model selection tool class for data preparation and transformation"""
__author__ = "<NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import cartopy.crs as ccrs
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
class Cmst(object):
"""
Class for climaproof model selection tool data preparation
"""
def __init__(self, sel_bbox, sel_time_mean, sel_experiment, data_dir='data'):
"""
Args:
* sell_box (dict):
bounding box for averaging: e.g. {"lat": np.array([32, 35]), "lon": np.array([12, 14])}
* sel_time_mean (string):
the temporal mean method of source dataset (e.g. summer, winter, annual)
* sel_experiment (string):
the experiment of source dataset (e.g. rcp26, rcp45, rcp85)
Kwargs:
* data_dir (path):
directory where intermediate files will be written
"""
self.var_selector = {
"tasmin":"climate change signal of daily minimum near-surface air temperature",
"tasmax":"climate change signal of daily maximum near-surface air temperature",
"rsds":"climate change signal of daily mean surface downwelling shortwave radiation",
"pr":"relative climate change signal of {} precipitation".format(sel_time_mean)
}
self.time_selector = {
"near":0,
"mid":1,
"far":2
}
self.sel_bbox = sel_bbox
self.sel_time_mean = sel_time_mean
self.sel_experiment = sel_experiment
self.data_dir = data_dir
self.cl_ccs = iris.load_raw(
os.path.join(data_dir,"ccs_{}_*_{}_*.nc".format(self.sel_time_mean, self.sel_experiment))
)
def prepare(self, cl):
cl_ccs_mean= iris.cube.CubeList()
ls_models = []
realization = 0
# Define a Cartopy 'ordinary' lat-lon coordinate reference system.
crs_latlon = ccrs.PlateCarree()
for i,c_ccs in enumerate(cl):
crs_cube = c_ccs.coord_system().as_cartopy_crs()
bbox_proj = crs_cube.transform_points(crs_latlon, self.sel_bbox['lon'], self.sel_bbox['lat'])
x_coord = c_ccs.coords(dim_coords=True, axis='X')[0]
y_coord = c_ccs.coords(dim_coords=True, axis='Y')[0]
bbox_const = iris.Constraint(
coord_values={x_coord.standard_name:lambda cell: bbox_proj[:,0][0] < cell < bbox_proj[:,0][1],
y_coord.standard_name:lambda cell: bbox_proj[:,1][0] < cell < bbox_proj[:,1][1]})
c_ccs = c_ccs.extract(bbox_const)
cl_ccs_mean.append(c_ccs.collapsed([x_coord.standard_name, y_coord.standard_name],
iris.analysis.MEAN))
# remove scalar dimensions and cell_methods (different names due to different projections)
cl_ccs_mean[i].remove_coord(x_coord.standard_name)
cl_ccs_mean[i].remove_coord(y_coord.standard_name)
cl_ccs_mean[i].cell_methods = c_ccs.cell_methods
# Add realization coordinate if it does not already exist
if not cl_ccs_mean[i].coords('realization'):
realization += 1
ensemble_coord = iris.coords.AuxCoord(realization, standard_name='realization')
cl_ccs_mean[i].add_aux_coord(ensemble_coord)
ls_models.append(c_ccs.attributes['model'])
return ls_models, cl_ccs_mean
def get_pandas_df(self, sel_time_frame = "near"):
ls_models = {}
cl_ccs_mean = {}
pds_ccs = {}
pds_perc = {}
pdf_ret =
|
pd.DataFrame()
|
pandas.DataFrame
|
import itertools as it
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklego.common import TrainOnlyTransformerMixin
from tests.conftest import np_types, n_vals, k_vals
class TrainOnlyTrainOnlyTransformer(TrainOnlyTransformerMixin, BaseEstimator):
def fit(self, X, y):
super().fit(X, y)
def transform_train(self, X, y=None):
return X + np.random.normal(0, 1, size=X.shape)
def test_hash_numpy():
"""Tests whether the hash function does not produce collisions on np arrays"""
hashes = []
for n, k, np_type in it.product(n_vals, k_vals, np_types):
X = np.random.normal(0, 2, (n, k)).astype(np_type)
hashes.append(TrainOnlyTransformerMixin._hash(X))
assert len(hashes) == len(set(hashes))
def test_hash_pandas():
"""Tests whether the hash function does not produce collisions on dataframes"""
hashes = []
for n, k, np_type in it.product(n_vals, k_vals, np_types):
X = pd.DataFrame(np.random.normal(0, 2, (n, k)).astype(np_type))
hashes.append(TrainOnlyTransformerMixin._hash(X))
assert len(hashes) == len(set(hashes))
def test_bare_trainonlytransformer(random_xy_dataset_regr):
"""Tests whether the trainonlytransformer will only transform train when used directly"""
X_train, X_test, y_train, y_test = train_test_split(*random_xy_dataset_regr)
trf = TrainOnlyTrainOnlyTransformer()
trf.fit(X_train, y_train)
assert np.all(trf.transform(X_train) != X_train)
assert np.all(trf.transform(X_test) == X_test)
def test_pipeline_trainonlytransformer(random_xy_dataset_regr):
"""Tests whether the trainonlytransformer will only transform train when used in a pipeline"""
X_train, X_test, y_train, y_test = train_test_split(*random_xy_dataset_regr)
trf = make_pipeline(TrainOnlyTrainOnlyTransformer())
trf.fit(X_train, y_train)
assert np.all(trf.transform(X_train) != X_train)
assert np.all(trf.transform(X_test) == X_test)
def test_bare_trainonlytransformer_pandas(random_xy_dataset_regr):
"""Tests whether the trainonlytransformer will only transform train when used directly"""
X, y = pd.DataFrame(random_xy_dataset_regr[0]), pd.DataFrame(random_xy_dataset_regr[1])
X_train, X_test, y_train, y_test = train_test_split(X, y)
trf = TrainOnlyTrainOnlyTransformer()
trf.fit(X_train, y_train)
assert np.all(trf.transform(X_train) != X_train)
assert np.all(trf.transform(X_test) == X_test)
assert isinstance(trf.transform(X_train), pd.DataFrame)
assert isinstance(trf.transform(X_test), pd.DataFrame)
def test_pipeline_trainonlytransformer_pandas(random_xy_dataset_regr):
"""Tests whether the trainonlytransformer will only transform train when used in a pipeline"""
X, y = pd.DataFrame(random_xy_dataset_regr[0]),
|
pd.DataFrame(random_xy_dataset_regr[1])
|
pandas.DataFrame
|
import collections
import functools
import gzip
import itertools
import os
import pathlib
import shlex
from collections import defaultdict
from functools import partial
from subprocess import run, PIPE, CalledProcessError
from typing import Union, List
import numpy as np
import pandas as pd
from ._doc import *
from ._open import open_allc
IUPAC_TABLE = {
'A': 'A',
'T': 'T',
'C': 'C',
'G': 'G',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ATC',
'V': 'ACG',
'N': 'ATCGN'
}
COMPLIMENT_BASE = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C',
'a': 't', 'c': 'g', 't': 'a', 'g': 'c',
'N': 'N', 'n': 'n'}
def reverse_compliment(seq):
return ''.join(map(lambda i: COMPLIMENT_BASE[i], seq[::-1]))
def get_allc_chroms(allc_path):
p = run(['tabix', '-l', allc_path],
check=True, stderr=PIPE, stdout=PIPE, encoding='utf8')
return p.stdout.strip('\n').split('\n')
@functools.lru_cache(maxsize=100)
def parse_mc_pattern(pattern: str) -> set:
"""
parse mC context pattern
"""
# IUPAC DNA abbr. table
all_pos_list = []
pattern = pattern.upper()
for base in pattern:
try:
all_pos_list.append(IUPAC_TABLE[base])
except KeyError:
raise KeyError(f'Base {base} is not in IUPAC table.')
context_set = set([''.join(i) for i in itertools.product(*all_pos_list)])
return context_set
@functools.lru_cache(maxsize=10)
def parse_chrom_size(path, remove_chr_list=None):
"""
support simple UCSC chrom size file, or .fai format (1st and 2nd columns same as chrom size file)
return chrom:length dict
"""
if remove_chr_list is None:
remove_chr_list = []
with open(path) as f:
chrom_dict = collections.OrderedDict()
for line in f:
# *_ for other format like fadix file
chrom, length, *_ = line.strip('\n').split('\t')
if chrom in remove_chr_list:
continue
chrom_dict[chrom] = int(length)
return chrom_dict
def chrom_dict_to_id_index(chrom_dict, bin_size):
sum_id = 0
index_dict = {}
for chrom, chrom_length in chrom_dict.items():
index_dict[chrom] = sum_id
sum_id += chrom_length // bin_size + 1
return index_dict
def get_bin_id(chrom, chrom_index_dict, bin_start, bin_size) -> int:
chrom_index_start = chrom_index_dict[chrom]
n_bin = bin_start // bin_size
return chrom_index_start + n_bin
def genome_region_chunks(chrom_size_path: str,
bin_length: int = 10000000,
combine_small: bool = True) -> List[str]:
"""
Split the whole genome into bins, where each bin is {bin_length} bp. Used for tabix region query
Parameters
----------
chrom_size_path
Path of UCSC genome size file
bin_length
length of each bin
combine_small
whether combine small regions into one record
Returns
-------
list of records in tabix query format
"""
chrom_size_dict = parse_chrom_size(chrom_size_path)
cur_chrom_pos = 0
records = []
record_lengths = []
for chrom, chrom_length in chrom_size_dict.items():
while cur_chrom_pos + bin_length <= chrom_length:
# tabix region is 1 based and inclusive
records.append(f'{chrom}:{cur_chrom_pos}-{cur_chrom_pos + bin_length - 1}')
cur_chrom_pos += bin_length
record_lengths.append(bin_length)
else:
records.append(f'{chrom}:{cur_chrom_pos}-{chrom_length}')
cur_chrom_pos = 0
record_lengths.append(chrom_length - cur_chrom_pos)
# merge small records (when bin larger then chrom length)
final_records = []
if combine_small:
temp_records = []
cum_length = 0
for record, record_length in zip(records, record_lengths):
temp_records.append(record)
cum_length += record_length
if cum_length >= bin_length:
final_records.append(' '.join(temp_records))
temp_records = []
cum_length = 0
if len(temp_records) != 0:
final_records.append(' '.join(temp_records))
else:
for record in records:
final_records.append(record)
return final_records
def parse_file_paths(input_file_paths: Union[str, list]) -> list:
if isinstance(input_file_paths, list) and (len(input_file_paths) == 1):
input_file_paths = input_file_paths[0]
if isinstance(input_file_paths, str):
if '*' in input_file_paths:
import glob
file_list = glob.glob(input_file_paths)
else:
file_list = []
with open(input_file_paths) as f:
for line in f:
file_list.append(line.strip('\n'))
_file_list = file_list
elif isinstance(input_file_paths, list):
_file_list = input_file_paths
else:
raise TypeError('File paths input is neither str nor list.')
final_file_list = []
for path in _file_list:
real_path = pathlib.Path(path).resolve()
if not real_path.exists():
raise FileNotFoundError(f'{path} provided do not exist.')
final_file_list.append(str(real_path))
return _file_list
def get_md5(file_path):
file_md5 = run(shlex.split(f'md5sum {file_path}'), stdout=PIPE, encoding='utf8', check=True).stdout
file_md5 = file_md5.split(' ')[0]
return file_md5
def check_tbi_chroms(file_path, genome_dict, same_order=False):
file_tabix_path = pathlib.Path(str(file_path) + '.tbi')
if not file_tabix_path.exists():
print(f'{file_path} do not have .tbi index. Use tabix to index it.')
return False
tbi_time = os.path.getmtime(file_tabix_path)
file_time = os.path.getmtime(file_path)
if file_time > tbi_time:
# tabix file create earlier than ALLC, something may changed for ALLC.
return False
try:
chroms = run(['tabix', '-l', file_path],
stdout=PIPE,
encoding='utf8',
check=True).stdout.strip().split('\n')
if len(set(chroms) - genome_dict.keys()) != 0:
return False
if same_order:
ref_order = list(genome_dict.keys())
cur_pos = -1
for chrom in chroms:
chrom_pos = ref_order.index(chrom)
if chrom_pos < cur_pos:
return False
else:
cur_pos = chrom_pos
except CalledProcessError:
return False
return True
def generate_chrom_bin_bed_dataframe(chrom_size_path: str,
window_size: int,
step_size: int = None) -> pd.DataFrame:
"""
Generate BED format dataframe based on UCSC chrom size file and window_size
return dataframe contain 3 columns: chrom, start, end. The index is 0 based continue bin index.
"""
if step_size is None:
step_size = window_size
chrom_size_dict = parse_chrom_size(chrom_size_path)
records = []
for chrom, chrom_length in chrom_size_dict.items():
bin_start = np.array(list(range(0, chrom_length, step_size)))
bin_end = bin_start + window_size
bin_end[np.where(bin_end > chrom_length)] = chrom_length
chrom_df = pd.DataFrame(dict(bin_start=bin_start, bin_end=bin_end))
chrom_df['chrom'] = chrom
records.append(chrom_df)
total_df = pd.concat(records)[['chrom', 'bin_start', 'bin_end']].reset_index(drop=True)
return total_df
@doc_params(allc_path_doc=allc_path_doc)
def profile_allc(allc_path, drop_n=True, n_rows=1000000, output_path=None):
"""\
Generate some summary statistics of 1 ALLC.
1e8 rows finish in about 5 min.
Parameters
----------
allc_path
{allc_path_doc}
drop_n
Whether to drop context that contain N, such as CCN.
This is usually very rare and need to be dropped.
n_rows
Number of rows to calculate the profile from.
The default number is usually sufficient to get pretty precise assumption.
output_path
Path of the output file. If None, will save the profile next to input ALLC file.
Returns
-------
"""
# TODO write test
# Find best default value
if 'gz' in allc_path:
opener = partial(gzip.open, mode='rt')
else:
opener = partial(open, mode='r')
# initialize count dict
mc_sum_dict = defaultdict(int)
cov_sum_dict = defaultdict(int)
cov_sum2_dict = defaultdict(int) # sum of square, for calculating variance
rate_sum_dict = defaultdict(float)
rate_sum2_dict = defaultdict(float) # sum of square, for calculating variance
context_count_dict = defaultdict(int)
with opener(allc_path) as f:
n = 0
for line in f:
chrom, pos, strand, context, mc, cov, p = line.split('\t')
if drop_n and 'N' in context:
continue
# mc and cov
mc_sum_dict[context] += int(mc)
cov_sum_dict[context] += int(cov)
cov_sum2_dict[context] += int(cov) ** 2
# raw base rate
rate = int(mc) / int(cov)
rate_sum_dict[context] += rate
rate_sum2_dict[context] += rate ** 2
# count context finally
context_count_dict[context] += 1
n += 1
if (n_rows is not None) and (n >= n_rows):
break
# overall count
profile_df = pd.DataFrame({'partial_mc': mc_sum_dict,
'partial_cov': cov_sum_dict})
profile_df['base_count'] = pd.Series(context_count_dict)
profile_df['overall_mc_rate'] = profile_df['partial_mc'] / profile_df['partial_cov']
# cov base mean and base std.
# assume that base cov follows normal distribution
cov_sum_series = pd.Series(cov_sum_dict)
cov_sum2_series = pd.Series(cov_sum2_dict)
profile_df['base_cov_mean'] = cov_sum_series / profile_df['base_count']
profile_df['base_cov_std'] = np.sqrt(
(cov_sum2_series / profile_df['base_count']) - profile_df['base_cov_mean'] ** 2)
# assume that base rate follow beta distribution
# so that observed rate actually follow joint distribution of beta (rate) and normal (cov) distribution
# here we use the observed base_rate_mean and base_rate_var to calculate
# approximate alpha and beta value for the base rate beta distribution
rate_sum_series = pd.Series(rate_sum_dict)
rate_sum2_series =
|
pd.Series(rate_sum2_dict)
|
pandas.Series
|
from jug import TaskGenerator, bvalue
from jug.hooks import exit_checks
exit_checks.exit_if_file_exists('jug.exit')
M6_NAMES = 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore'.split()
def use_name(na):
import hashlib
return hashlib.sha256(na.encode('ascii')).hexdigest()[:3] == 'fff'
@TaskGenerator
def basic_stats(fname):
import pandas as pd
t = pd.read_table(fname, header=None, names=M6_NAMES)
res = {
'total_qs': len(set(t.qseqid)),
}
no_self_hits = t.query('qseqid != sseqid')
allqs = set(no_self_hits.qseqid)
res['any_hit'] = len(allqs)
res['use_name'] = sum(use_name(q) for q in allqs)
no_self_hits = no_self_hits.query('pident >= 90')
res['any_hit_90'] = len(set(no_self_hits.qseqid))
no_self_hits = no_self_hits.query('pident >= 92')
res['any_hit_92'] = len(set(no_self_hits.qseqid))
no_self_hits = no_self_hits.query('pident >= 95')
res['any_hit_95'] = len(set(no_self_hits.qseqid))
return pd.Series(res)
def iter_fasta(fname):
header = None
seq = []
for line in open(fname):
line = line.strip()
if line[0] == '>':
if header is not None:
if len(seq) == 1:
[seq] = seq
else:
seq = ''.join([s for s in seq])
yield (header, seq)
header = line
seq = []
else:
seq.append(line)
@TaskGenerator
def split_fasta(fname, base, split_size_in_mb):
from os import path
splits = []
next_i = 0
split_size = split_size_in_mb * 1000 * 1000
cur = split_size + 1
for h,seq in iter_fasta(fname):
if len(seq) + cur > split_size:
oname = path.join(base, 'split.{}.fa'.format(next_i))
splits.append(oname)
out = open(oname, 'wt')
next_i += 1
cur = 0
cur += len(seq)
out.write('{}\n{}\n'.format(h, seq))
return splits
@TaskGenerator
def run_diamond(s):
oname = s.replace('.fa', '.m6')
from jug.utils import jug_execute
jug_execute.f([
'/g/scb2/bork/mocat/software/diamond/0.9.24/diamond',
'blastp',
'--threads', '12',
'-d', 'GMGC.95nr.faa',
'-q', s,
'--outfmt', '6',
'-o', oname])
return oname
@TaskGenerator
def post_process1(m6):
import skbio.alignment
from skbio.sequence import DNA,Protein
import pandas as pd
import numpy as np
from fasta_reader import IndexedFastaReader
ix = IndexedFastaReader('GMGC.95nr.fna', use_mmap=True)
cache_ix = {}
def get_ix(n):
if n not in cache_ix:
if len(cache_ix) > 1000_000:
cache_ix.clear()
cache_ix[n] = ix.get(n).decode('ascii') \
.replace('N','A') \
.replace('K','A') \
.replace('S','A') \
.replace('M','A') \
.replace('W','A') \
.replace('D','A') \
.replace('R','A') \
.replace('Y','A')
return cache_ix[n]
t = pd.read_table(m6, header=None, names=M6_NAMES)
print("Loaded table...: ({} entries)".format(len(t)))
no_self_hits = t.query('qseqid != sseqid').query('pident >= 90')
print("Filtered table... ({} entries)".format(len(no_self_hits)))
no_self_hits = no_self_hits[no_self_hits.qseqid.map(use_name)]
print("Re-filtered table... ({} entries)".format(len(no_self_hits)))
data = []
n = 0
for _,row in no_self_hits.iterrows():
na = row['qseqid']
#if not use_name(na):
# continue
#print(na)
nb = row['sseqid']
sa = get_ix(na)
sb = get_ix(nb)
sw_dna = skbio.alignment.local_pairwise_align_ssw(DNA(sa),DNA(sb))
[(sa_start, sa_end), (sb_start, sb_end)] = sw_dna[2]
a = sw_dna[0]
al_id = np.mean(a.conservation() == 1.0)
data.append((na, nb, al_id, sa_start, sa_end, sb_start, sb_end, len(sa), len(sb)))
#print(data[-1])
n += 1
if n % 10 == 0:
print(n, len(no_self_hits), '{:.1%}'.format(n/len(no_self_hits)), m6)
data = pd.DataFrame(data, columns=['name_a', 'name_b', 'align_id', 'a_start', 'a_end', 'b_start', 'b_end', 'a_len', 'b_len'])
oname = m6.replace('.m6','.tsv')
data.to_csv(oname, sep='\t')
return oname
def cover_full(row):
return (row['a_start'] == 0 and row['a_end']+1 == row['a_len']) \
or (row['b_start'] == 0 and row['b_end']+1 == row['b_len'])
def ends_overlap(row):
[a_start, a_end, b_start, b_end, a_len, b_len] = row[['a_start', 'a_end', 'b_start', 'b_end', 'a_len', 'b_len']]
MARGIN = 5
return ((a_len - a_end - 1 <= MARGIN) and (b_start <= MARGIN)) or ((b_len - b_end - 1 <= MARGIN) and (a_start <= MARGIN))
@TaskGenerator
def get_overlapped(t):
import pandas as pd
final =
|
pd.read_table(t)
|
pandas.read_table
|
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from IPython.display import Image
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from sklearn.exceptions import NotFittedError
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import KFold, cross_val_score
from causalnex.structure import data_generators as dg
from causalnex.structure.pytorch import DAGClassifier, DAGRegressor
class TestDAGSklearn:
""" Tests aspects common to both DAGRegressor and DAGClassifier """
@pytest.mark.parametrize("model", [DAGRegressor, DAGClassifier])
@pytest.mark.parametrize(
"val, msg, error",
[
({"alpha": "0.0"}, "alpha should be numeric", TypeError),
({"beta": "0.0"}, "beta should be numeric", TypeError),
({"fit_intercept": 0}, "fit_intercept should be a bool", TypeError),
({"threshold": "0.0"}, "threshold should be numeric", TypeError),
],
)
def test_input_type_assertion(self, val, msg, error, model):
with pytest.raises(error, match=msg):
model(**val)
@pytest.mark.parametrize("model", [DAGRegressor, DAGClassifier])
def test_notfitted_error(self, model):
m = model()
X = np.random.normal(size=(100, 2))
with pytest.raises(NotFittedError):
m.predict(X)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_tabu_parent_nodes(self, model, y):
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y, name="test")
m = model(dependent_target=True, tabu_parent_nodes=["test"])
assert "test" in m.tabu_parent_nodes
m = model(dependent_target=True, tabu_parent_nodes=[])
m.fit(X, y)
assert "test" not in m.tabu_parent_nodes
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_numpy_fit(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_pandas_fit(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
@pytest.mark.parametrize("enforce_dag", [True, False])
def test_plot_dag(self, enforce_dag, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
# plot with no passed axes
plot = m.plot_dag(enforce_dag=enforce_dag)
assert isinstance(plot, tuple)
assert isinstance(plot[0], Figure)
assert isinstance(plot[1], Axes)
# plot with passed axes
_, ax = plt.subplots()
plot = m.plot_dag(enforce_dag=enforce_dag, ax=ax)
assert isinstance(plot, tuple)
assert plot[0] is None
assert isinstance(plot[1], Axes)
# plot with no passed axes
plot = m.plot_dag(enforce_dag=enforce_dag)
assert isinstance(plot, tuple)
assert isinstance(plot[0], Figure)
assert isinstance(plot[1], Axes)
# plot with Ipython
plot = m.plot_dag(enforce_dag=enforce_dag, use_mpl=False)
assert isinstance(plot, Image)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
@pytest.mark.parametrize(
"hidden_layer_units", [None, [], [0], [1], (0,), (1,), [1, 1], (1, 1)]
)
def test_hidden_layer_units(self, hidden_layer_units, model, y):
m = model(hidden_layer_units=hidden_layer_units)
X = np.random.normal(size=(100, 2))
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_enforce_dag(self, model, y):
m = model(enforce_dag=True)
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
assert nx.algorithms.is_directed_acyclic_graph(m.graph_)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_container_predict_type(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
assert isinstance(m.predict(X), np.ndarray)
m = model()
X = np.random.normal(size=(100, 2))
X, y =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
from itertools import product
import epimargin.plots as plt
import numpy as np
import pandas as pd
import seaborn as sns
from epimargin.estimators import analytical_MPVS
from epimargin.models import SIR
from epimargin.policy import PrioritizedAssignment, RandomVaccineAssignment, VaccinationPolicy
from studies.age_structure.commons import *
sns.set(style = "whitegrid")
num_sims = 1000
def save_results(model, data, dVx_adm, dVx_eff, dVx_imm, tag):
print(":::: serializing results")
if "novaccination" in tag:
pd.DataFrame(model.dT).to_csv(data/f"latest_sims/dT_{tag}.csv")
|
pd.DataFrame(model.dD)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from joblib import parallel_backend
from multiprocessing import cpu_count
import os, gc, joblib
from tqdm import tqdm
from collections import defaultdict
import torch
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
pd.set_option("display.max_rows", 20)
osj = os.path.join; osl = os.listdir
n_cpus = cpu_count()
class ViralDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
self.mode = mode
if mode != 'test':
self.targets = df['virality'].values # [:,np.newaxis] # - 1
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=torch.long)) # long))
class ExtractFeatsDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, target_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
# self.target_cols = target_cols
self.mode = mode
if mode != 'test':
if len(target_cols)==1:
self.targets = df[target_cols[0]].values # [:,np.newaxis] # - 1
self.target_dtype = torch.long
else:
self.targets = df[target_cols].values # [:,np.newaxis] # - 1
self.target_dtype = torch.float32
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=self.target_dtype)) # long))
def to_binary_categories(df, cat_col='tweet_language_id'):
df.loc[:, cat_col] = (df[cat_col]!=0).astype(np.int8)
return df
def freq_encoding(df, freq_cols: list, main_col='tweet_id'):
for c in freq_cols:
count_df = df.groupby([c])[main_col].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def bin_feats(df, feats=[], n_bins_default=20):
bin_counts = defaultdict(lambda: n_bins_default)
bin_counts['user_tweet_count'] = 20
for feature in feats:
if '_binned' in feature:
continue
n_bins = bin_counts[feature]
if n_bins:
bins = np.unique(df[feature].quantile(np.linspace(0, 1, n_bins)).values)
df[feature + '_binned'] = pd.cut(
df[feature], bins=bins, duplicates='drop'
).cat.codes
return df
def to_categorical(df):
cat_cols = ['tweet_has_attachment', 'user_has_location', 'user_has_url', 'user_verified', ]
df[cat_cols] = df[cat_cols].astype('category')
return df
def change2float32(df):
float_cols = df.select_dtypes('float64').columns
df[float_cols] = df[float_cols].astype(np.float32)
return df
def merge_df2media(df, df_media):
num_media = (df_media.groupby('tweet_id')['media_id']
.nunique()
.reset_index())
df_media.drop('media_id', axis=1, inplace=True)
num_media.columns = ['tweet_id', 'num_media']
df_media = df_media.merge(num_media, how='left', on='tweet_id')
media_cols = [col for col in df_media if col not in ['tweet_id','media_id']]
df_media = df_media.groupby('tweet_id')[media_cols].mean().reset_index()
# df_media = mean_feats.merge(df_media[['tweet_id']], how='left', on='tweet_id')
# del mean_feats; _ = gc.collect()
df_media['tweet_has_media'] = True
df = df.merge(df_media, how='left', on='tweet_id')
# fillna False if tweet has no media
df['tweet_has_media'] = df['tweet_has_media'].fillna(False)
# the same for the count of number of media per tweet
df['num_media'] = df['num_media'].fillna(0).astype(np.int8)
return df
# def add_num_media_user(df):
# # todo when not debug: df['num_media'].equals(df['num_media_user'])
# num_media_user = df.groupby('tweet_id')['num_media'].sum().reset_index()
# num_media_user.columns = ['tweet_id','num_media_user']
# df = df.merge(num_media_user, how='left', on='tweet_id')
# df['num_media_user'] = df['num_media_user'].astype(np.int8)
# return df
def tweets_user_created_date(df):
for feat_ in ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour']:
# counts_df_cols = ['tweet_user_id']+[f"tweets_in_{feat_.split('_')[-1]}_{time_}" for time_ in np.sort(df[feat_].unique())]
# tweet_user_ids = np.sort(df['tweet_user_id'].unique())
# counts_df = pd.DataFrame(index=range(tweet_user_ids), columns=counts_df_cols)
# counts_df['tweet_user_id'] = tweet_user_ids
counts_map = df.groupby('tweet_user_id')[feat_].apply(lambda x: x.value_counts())
counts_map = counts_map.unstack(level=1)
counts_map.columns = [f"tweets_in_{feat_.split('_')[-1]}_"+str(col) for col in counts_map.columns]
counts_map = counts_map.fillna(0).reset_index()
df = df.merge(counts_map, how='left', on='tweet_user_id')
return df
# n_tweets_time_user = df.groupby('tweet_user_id')[feat_].count().reset_index()
# n_tweets_time_user.columns = ['tweet_user_id', f"n_tweets_{feat_.split('_')[-1]}_user_count"]
# df = df.merge(n_tweets_time_user, how='left', on='tweet_user_id')
def create_date_col(df):
tweet_date_cols = ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day']
df['date'] = df[tweet_date_cols].apply(lambda x:
str(x['tweet_created_at_month']).strip() + '/' +
str(x['tweet_created_at_day']).strip() + '/' +
str(x['tweet_created_at_year']).strip(), axis=1)
df['date'] =
|
pd.to_datetime(df['date'])
|
pandas.to_datetime
|
import pandas as pd
import sys
# from urllib import urlopen # python2
from urllib.request import urlopen
#try:
# from rpy2.robjects.packages import importr
# try:
# biomaRt = importr("biomaRt")
# except:
# print "rpy2 could be loaded but 'biomaRt' could not be found.\nIf you want to use 'biomaRt' related functions please install 'biomaRt' in R.\n\n$ R\n> source('http://bioconductor.org/biocLite.R')\n> biocLite()\n> biocLite('biomaRt')\n> quit()"
# sys.stdout.flush()
#except:
# print "Failed to import rpy2 module.\nPlease make sure you are using the same version of R you had when AGEpy was installed."
# sys.stdout.flush()
import biomart
from biomart import BiomartServer
def organismsKEGG():
"""
Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row.
"""
organisms=urlopen("http://rest.kegg.jp/list/organism").read()
organisms=organisms.decode().split("\n")
#for o in organisms:
# print o
# sys.stdout.flush()
organisms=[ s.split("\t") for s in organisms ]
organisms=pd.DataFrame(organisms)
return organisms
def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.decode().split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.decode().split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.decode().split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.decode().split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df
def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read()
kegg_ec=kegg_ec.decode().split("\n")
final=[]
for k in kegg_ec:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['ec','KEGGid']
return df
def idsKEGG(organism):
"""
Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
"""
ORG=urlopen("http://rest.kegg.jp/list/"+organism).read()
ORG=ORG.decode().split("\n")
final=[]
for k in ORG:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['KEGGid','description']
field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0]
field = pd.DataFrame(field)
df = pd.concat([df[['KEGGid']],field],axis=1)
df.columns=['KEGGid','gene_name']
df=df[['gene_name','KEGGid']]
return df
def pathwaysKEGG(organism):
"""
Retrieves all pathways for a given organism.
:param organism: an organism as listed in organismsKEGG()
:returns df: a Pandas dataframe with the columns 'KEGGid','pathIDs', and 'pathName'.
:returns df_: a Pandas dataframe with a columns for 'KEGGid', and one column for each pathway with the corresponding gene ids below
"""
print("KEGG API: http://rest.kegg.jp/list/pathway/"+organism)
sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.decode().split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("KEGG API: http://rest.kegg.jp/link/"+organism+"/pathway/")
sys.stdout.flush()
kegg_paths_genes=urlopen("http://rest.kegg.jp/link/"+organism+"/pathway/").read()
kegg_paths_genes=kegg_paths_genes.decode().split("\n")
kegg_paths_genes=[ s.split("\t") for s in kegg_paths_genes ]
kegg_paths_genes=pd.DataFrame(kegg_paths_genes)
kegg_paths_genes.columns=['pathID','KEGGid']
df=pd.merge(kegg_paths_genes,df,on=["pathID"],how="outer")
def CombineAnn(df):
return pd.Series(dict(KEGGid = ', '.join([ s for s in list(set(df['KEGGid'])) if str(s) != "nan" ] ) ,
pathIDs = ', '.join([ s for s in list(set(df['pathID'])) if str(s) != "nan" ]),
pathName = ', '.join([ s for s in list(set(df['pathName'])) if str(s) != "nan" ] ) ) )
df=df.groupby('KEGGid',as_index=True).apply(CombineAnn)
df.reset_index(inplace=True, drop=True)
df_=kegg_paths_genes[['KEGGid']].drop_duplicates()
for c in list(set(kegg_paths_genes["pathID"].tolist())):
tmp=kegg_paths_genes[kegg_paths_genes["pathID"]==c][["KEGGid"]].drop_duplicates().dropna()
tmp.columns=[c]
df_=pd.merge(df_,tmp,left_on=["KEGGid"],right_on=[c],how="outer")
return df, df_
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus
def expKEGG(organism,names_KEGGids):
"""
Gets all KEGG pathways for an organism
:param organism: an organism as listed in organismsKEGG()
:param names_KEGGids: a Pandas dataframe with the columns 'gene_name': and 'KEGGid' as reported from idsKEGG(organism) (or a subset of it).
:returns df: a Pandas dataframe with 'KEGGid','pathID(1):pathNAME(1)', 'pathID(n):pathNAME(n)'
:returns paths: a list of retrieved KEGG pathways
"""
#print "KEGG API: http://rest.kegg.jp/list/pathway/"+organism
#sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.decode().split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("Collecting genes for pathways")
sys.stdout.flush()
df_pg=pd.DataFrame()
for i in df['pathID'].tolist():
print(i)
sys.stdout.flush()
path_genes=urlopen("http://rest.kegg.jp/link/genes/"+i).read()
path_genes=path_genes.decode().split("\n")
final=[]
for k in path_genes:
final.append(k.split("\t"))
if len(final[0]) > 1:
df_tmp=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df_tmp.columns=['pathID','KEGGid']
df_pg=pd.concat([df_pg,df_tmp])
df=pd.merge(df,df_pg,on=["pathID"], how="outer")
df=df[df['KEGGid'].isin(names_KEGGids['KEGGid'].tolist())]
df=pd.merge(df,names_KEGGids,how='left',on=['KEGGid'])
df_fA=pd.DataFrame(columns=['KEGGid'])
paths=[]
for k in df[['pathID']].drop_duplicates()['pathID'].tolist():
df_tmp=df[df['pathID']==k]
pathName=df_tmp['pathName'].tolist()[0]
pathName=" : ".join([k,pathName])
keggIDs_in_path=df_tmp[['KEGGid']].drop_duplicates()['KEGGid'].tolist()
a={pathName:keggIDs_in_path}
a=pd.DataFrame(a,index=range(len(keggIDs_in_path)))
a['KEGGid']=a[pathName].copy()
df_fA=pd.merge(df_fA,a,how='outer',on=['KEGGid'])
paths.append(pathName)
return df_fA, paths
rbiomart_host="www.ensembl.org"
biomart_host="http://www.ensembl.org/biomart"
def KEGGmatrix(organism, dataset, query_attributes=['ensembl_gene_id','kegg_enzyme'], host=biomart_host,links=True,dfexp=None,kegg_db=None, database=None ):
"""
Looks for all KEGG annotatios of an organism in biomart and the respective pathways in KEGG. It can also retrieve links to pathways figures with red labeled genes provided in a dataframe.
:param organism: a KEGG organism identifier
:param dataset: a biomaRt dataset
:param query_attributes: biomaRt query attributes, the name can change but the output should stay in the same order ie. 'ensembl_gene_id','kegg_enzyme'
:param host: biomaRt_host
:param links: if True, returns df_links
:param dfexp: a Pandas dataframe with the following columns: 'ensembl_gene_id', 'log2FC'
:param kegg_db: a KEGG database as recovered by the databasesKEGG function
:param database: a biomaRt database, depecrated, default=None.
:returns df: a Pandas dataframe with the 'KEGGid','pathsIDs','pathName','ensembl_gene_id','kegg_enzyme'
:returns df_: a matrix with a column for each KEGG pathway for a given organism and the expression values in the respective dfexp in parameter
:returns fullmatrix: a matrix with a column for each KEGG pathway for a given organism
:returns df_links: a dataframe with links for each pathway and the links in the dfexp highlighted red (if df_links.
"""
try:
# Get all ensembl gene ids and keeg enzyme labels from biomaRt
#biomaRt = importr("biomaRt")
#ensemblMart=biomaRt.useMart(database, host=host)
#ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
#biomaRt_output=biomaRt.getBM(attributes=query_attributes,mart=ensembl)
#biomaRt_output = [tuple([biomaRt_output[j][i] for j in range(biomaRt_output.ncol)]) for i in range(biomaRt_output.nrow)]
#biomaRt_output = pd.DataFrame(biomaRt_output)
#biomaRt_output.columns = ['ensembl_gene_id','kegg_enzyme']
def QueryBioMart(dataset,attributes,host=host):
server = BiomartServer( host )
organism=server.datasets[dataset]
response=organism.search({'attributes':attributes})
response=response.content.split("\n")
response=[s.split("\t") for s in response ]
response=pd.DataFrame(response,columns=attributes)
return response
biomaRt_output=QueryBioMart(dataset,query_attributes,host=host)
biomaRt_output = biomaRt_output[biomaRt_output['kegg_enzyme']!='']
biomaRt_output.reset_index(inplace=True,drop=True)
biomaRt_output=biomaRtTOkegg(biomaRt_output)
except:
# Do it wiht KEGG
ec_KEGGid=ecs_idsKEGG(organism)
KEGGid_ENSid=ensembl_to_kegg(organism,kegg_db)
biomaRt_output=pd.merge(ec_KEGGid,KEGGid_ENSid,on=['KEGGid'],how="outer")
biomaRt_output=biomaRt_output.drop(['KEGGid'],axis=1)
biomaRt_output=biomaRt_output[['ENSid','ec']]
biomaRt_output.columns=['ensembl_gene_id','kegg_enzyme']
# Gett all pathways
df, df_=pathwaysKEGG(organism)
fullmatrix=df_.copy()
# Get all KEGG ecs from KEGG
ecs=ecs_idsKEGG(organism)
biomaRt_output=
|
pd.merge(biomaRt_output,ecs,left_on=['kegg_enzyme'],right_on=['ec'],how="outer")
|
pandas.merge
|
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Setup::
mkdir -p test-data/input
mkdir -p test-data/output
mysql -u root -p
CREATE DATABASE testdb;
CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'test<PASSWORD>';
GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%';
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import d6tstack.utils
import math
import pandas as pd
# import pyarrow as pa
# import pyarrow.parquet as pq
import ntpath
import shutil
import dask.dataframe as dd
import sqlalchemy
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class DebugLogger(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = DebugLogger('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common,allstr=True):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
if allstr:
df_all = df_all[df_all.columns].astype(str)
return df_all
def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True):
dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True)
if convert_date:
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns]
assert dfg.equals(dfchk)
return True
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# scan header
#************************************************************
#************************************************************
def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
# clean
combiner = CombinerCSV(fname_list=create_files_csv)
combiner.sniff_columns()
assert combiner.is_all_equal()
assert combiner.is_column_present().all().all()
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all']
assert combiner.sniff_results['columns_unique'] == []
# extra column
combiner = CombinerCSV(fname_list=create_files_csv_colmismatch)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert not combiner.is_column_present().all().all()
assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False]
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2']
assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit']
assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_unique'] == ['profit2']
assert combiner.is_column_present_unique().columns.tolist() == ['profit2']
# mixed order
combiner = CombinerCSV(fname_list=create_files_csv_colreorder)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2]
def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch):
# rename
df = CombinerCSV(fname_list=create_files_csv).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename()
assert df.columns.tolist()==['cost']
assert df['cost'].unique().tolist()==['cost2']
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename()
assert df.columns.tolist()==['profit2']
assert df['profit2'].unique().tolist()==[np.nan, 'profit3']
# select
l = CombinerCSV(fname_list=create_files_csv).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select()
assert l2==l
l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select()
assert l == ['date', 'sales', 'cost']
l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select()
assert l == ['date', 'sales', 'cost', 'profit', 'profit2']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
# rename+select
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
df = CombinerCSV(fname_list=create_files_csv).to_pandas()
assert df.shape == (30, 6)
df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
assert df.shape == (30, 6+1)
assert df['profit2'].isnull().unique().tolist() == [True, False]
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas()
assert df.shape == (30, 6)
assert 'profit2' not in df.columns
# rename+select
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
def test_combinepreview(create_files_csv_colmismatch):
df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('<M8[ns]'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def test_tocsv(create_files_csv_colmismatch):
fname = 'test-data/output/combined.csv'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_combine(filename=fname)
assert fname == fnameout
df = pd.read_csv(fname)
dfchk = df.copy()
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_csv_combine(filename=fname)
df = pd.read_csv(fname)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'filepath', 'filename']
assert check_df_colmismatch_combine(df,is_common=True)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_csv(fname)
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
helper('test-data/output/')
df = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert df.reset_index(drop=True).equals(dfchk)
assert check_df_colmismatch_combine(df)
# check creates directory
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
_ = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir='test-data/output-tmp')
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
def test_topq(create_files_csv_colmismatch):
fname = 'test-data/output/combined.pq'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_combine(filename=fname)
assert fname == fnameout
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='pyarrow')
assert df2.equals(df)
assert check_df_colmismatch_combine(df)
df = dd.read_parquet(fname)
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='fastparquet')
assert df2.equals(df)
df3 =
|
pd.read_parquet(fname, engine='pyarrow')
|
pandas.read_parquet
|
# +
import random
import datetime
import pandas as pd
from sqlalchemy import (Column, TEXT, FLOAT, BOOLEAN,
JSON, VARCHAR, DATETIME)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class _TestsExampleTable(Base):
"""
Example table compatible with Postgres, SQLite and MySQL for testing.
"""
__tablename__ = 'pangres_example'
# use VARCHAR for the pk so MySQL doesn't complain...
# MySQL does not want variable length text as index
profileid = Column(VARCHAR(10), primary_key=True)
email = Column(TEXT)
timestamp = Column(DATETIME(timezone=True))
size_in_meters = Column(FLOAT)
likes_pizza = Column(BOOLEAN)
favorite_colors = Column(JSON)
@classmethod
def create_example_df(cls, nb_rows):
emails = ['foo', 'bar', 'baz', 'test', 'abc', 'foobar', 'foobaz']
domains = ['gmail.com', 'yahoo.fr', 'yahoo.com', 'outlook.fr']
email_choices = []
for i in range(nb_rows):
email = random.choice(emails)
domain = random.choice(domains)
email_choices.append(f'{email}@{domain}')
timestamps = [(datetime.datetime
.fromtimestamp(random.randint(1000000000,1300000000))
.astimezone(datetime.timezone.utc))
for i in range(nb_rows)]
colors = ['yellow', 'blue', 'pink', 'red', 'orange', 'brown']
favorite_colors = []
for i in range(nb_rows):
l = [random.choice(colors) for i in range(random.randint(1,3))]
favorite_colors.append(l)
data = {# IMPORTANT! for our tests make profileid incremental!
# it's not an integer (see table definition for an explanation why not)
# but we just add a prefix or something to an incremented number
'profileid':[str(f'abc{i}') for i in range(nb_rows)],
'email':email_choices,
'timestamp':timestamps,
'size_in_meters':[random.uniform(1.5,2.3) for i in range(nb_rows)],
'likes_pizza':[random.choice([True, False]) for i in range(nb_rows)],
'favorite_colors':favorite_colors}
df = pd.DataFrame(data).set_index('profileid')
return df
class DocsExampleTable():
"""
Example DataFrames for the docs.
"""
# create some test data
_data = {'full_name':['<NAME>', '<NAME>', '<NAME>'],
'likes_sport':[True, True, False],
'updated':[pd.Timestamp('2020-02-01', tz='UTC'),
pd.Timestamp('2020-04-01', tz='UTC'), pd.NaT],
'size_in_meters':[1.77, 1.96, None]}
# create DataFrame using this test data
df = pd.DataFrame(_data).set_index('full_name')
# create test data for showing an INSERT UPDATE
_new_data = {'full_name':['<NAME>', '<NAME>'],
'likes_sport':[True, True],
'updated':[pd.Timestamp('2020-04-04', tz='UTC'), pd.NaT],
'size_in_meters':[1.88, 1.88]}
new_df =
|
pd.DataFrame(_new_data)
|
pandas.DataFrame
|
import pandas as pd
import configparser
import json
import glob
import os
from loguru import logger
from vectorizer import Vectorizer
import pickle
import urllib
import re
class DataIO(object):
def __init__(self, config_path="config.cfg", autoload=True):
self.config_path = config_path
self.autoload = autoload
config = configparser.ConfigParser()
config.read(config_path)
self.DATA_DIR = config.get("DATA", "DATA_DIR")
self.DATA_URL = config.get("DATA", "DATA_URL")
self.df = pd.DataFrame()
if autoload:
self._load_metadata()
def __str__(self):
return f"DATA_DIR:{self.DATA_DIR}, df_loaded:{not self.df.empty}, df_shape:{self.df.shape}, autoload:{self.autoload}, config_path:{self.config_path}"
def __repr__(self):
return self.__str__()
def update(self):
if self.DATA_URL:
logger.info("Downloading Data... Please Wait")
f_name, htm = urllib.request.urlretrieve(
self.DATA_URL, f"{self.DATA_DIR}/metadata.csv"
)
logger.info(f"Data Downloaded!\n {htm.items()}")
logger.info("Processing Data... Please Wait")
df = pd.read_csv(f_name)
df = df[
[
"title",
"abstract",
"publish_time",
"authors",
"journal",
"source_x",
"url",
]
].fillna(" ")
self.df = self.get_data()
df = self.df[~self.df["title"].isin(df["title"])]
if df.empty:
logger.info("No New data to update")
else:
logger.info(f"New Data after last run: {df.shape}")
df["title_vect"] = df["title"].apply(Vectorizer.vectorize_sent)
self.df = pd.concat([self.df, df], ignore_index=True)
logger.info(f"Data Processed\n")
self._write_pickle(
filename=self.DATA_DIR + "/processed_metadata.pickle"
)
logger.info("Updated Processed File Created!")
return self.df
else:
logger.warning("Update Failed")
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=kind)
result = SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_uses_first_kind(self, kind):
other = "integer" if kind == "block" else "block"
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=other)
result = SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize(
"other, expected_dtype",
[
# compatible dtype -> preserve sparse
(pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)),
# (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)),
# incompatible dtype -> Sparse[common dtype]
(pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)),
# incompatible dtype -> Sparse[object] dtype
(pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)),
# categorical with compatible categories -> dtype of the categories
(pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")),
(pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")),
# categorical with incompatible categories -> object dtype
(pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)),
],
)
def test_concat_with_non_sparse(other, expected_dtype):
# https://github.com/pandas-dev/pandas/issues/34336
s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0))
result = pd.concat([s_sparse, other], ignore_index=True)
expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
import pandas
from collections import defaultdict
import logging
from pnl import AverageCostProfitAndLoss
def _select_prices(reporting_currency, prices):
"""
:param reporting_currency:
:param prices:
:return:
"""
all_prices = prices.set_index('date')
filter_columns = [column for column in all_prices.columns if column.endswith(reporting_currency)]
prices_selection = all_prices[filter_columns]
prices_selection.columns = [column.split('/')[0] for column in prices_selection.columns]
return prices_selection
def _include_indices(target_df, source_df):
"""
Adds missing indices from source_df into target_df.
:param target_df:
:param source_df:
:return:
"""
complete_index = target_df.index.append(source_df.index)
reindexed = target_df.reindex(complete_index)
return reindexed.sort_index()
def compute_balances(flows):
"""
Balances by currency.
:param flows:
:return:
"""
flows = flows.set_index('date')
flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)
balances = flows_by_asset.fillna(0).cumsum()
return balances
def extend_balances(reporting_currency, balances, prices):
"""
:param balances:
:param reporting_currency:
:param prices:
:return:
"""
prices_selection = _select_prices(reporting_currency, prices)
# removes duplicates (TODO: find bug)
prices_selection = prices_selection[~prices_selection.index.duplicated(keep='first')]
prices_selection = _include_indices(prices_selection, balances).ffill()
extended_balances = _include_indices(balances, prices_selection).ffill()
# removing duplicates
extended_balances = extended_balances.groupby('date').first()
return extended_balances, prices_selection
def compute_trades_pnl(reporting_currency, prices, trades):
"""
Trades P&L by asset expressed in the reporting currency.
:param reporting_currency:
:param prices:
:param trades:
:return: DataFrame (<index 'date'>, list of asset codes) containing pnl history for each asset
"""
logging.debug('loaded orders:\n{}'.format(trades))
if trades.empty:
result = pandas.DataFrame({'asset': [], 'date': [], 'realized_pnl': [], 'total_pnl': [], 'unrealized_pnl': []})
else:
trades = trades.set_index('date')
prices_selection = _select_prices(reporting_currency, prices)
prices_selection[reporting_currency] = 1
prices_selection = _include_indices(prices_selection, trades).ffill()
pnl_tracker = defaultdict(AverageCostProfitAndLoss)
pnl_data = list()
for timestamp, price_row in prices_selection.iterrows():
if timestamp in trades.index:
current_trades = trades.loc[timestamp]
for trade_ts, trade_row in current_trades.iterrows():
fees = trade_row['fee']
asset = trade_row['asset']
fill_qty = float(trade_row['amount'])
fill_price = price_row[asset]
pnl_tracker[asset].add_fill(fill_qty, fill_price, fees)
pnl_asset_data = {
'date': trade_ts,
'asset': asset,
'unrealized_pnl': pnl_tracker[asset].get_unrealized_pnl(fill_price),
'realized_pnl': pnl_tracker[asset].realized_pnl,
'total_pnl': pnl_tracker[asset].get_total_pnl(fill_price),
}
pnl_data.append(pnl_asset_data)
logging.info('*trade* added pnl data: {}'.format(pnl_asset_data))
else:
for asset in pnl_tracker:
pnl_asset_data = {
'date': timestamp,
'asset': asset,
'unrealized_pnl': pnl_tracker[asset].get_unrealized_pnl(price_row[asset]),
'realized_pnl': pnl_tracker[asset].realized_pnl,
'total_pnl': pnl_tracker[asset].get_total_pnl(price_row[asset]),
}
pnl_data.append(pnl_asset_data)
logging.info('added pnl data: {}'.format(pnl_asset_data))
result =
|
pandas.DataFrame(pnl_data)
|
pandas.DataFrame
|
import warnings
import numpy as np
import pandas as pd
import torch
from seqeval.metrics import classification_report as classification_report_seqeval
from sklearn.metrics import classification_report as classification_report_sklearn
from sklearn.metrics import confusion_matrix as confusion_matrix_sklearn
from typing import List, Dict, Union, Tuple, Any, Optional
from nerblackbox.modules.ner_training.metrics.ner_metrics import NerMetrics
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
from nerblackbox.modules.ner_training.annotation_tags.annotation import Annotation
class NerModelEvaluation:
def __init__(
self,
current_epoch: int,
annotation: Annotation,
default_logger,
logged_metrics,
):
"""
Args:
current_epoch: e.g. 1
annotation: [Annotation]
default_logger:
logged_metrics:
"""
self.current_epoch = current_epoch
self.annotation = annotation
self.annotation_plain = annotation.change_scheme(new_scheme="plain")
self.default_logger = default_logger
self.logged_metrics = logged_metrics
####################################################################################################################
# 2. VALIDATE / COMPUTE METRICS
####################################################################################################################
def execute(
self, phase: str, outputs: List[torch.Tensor]
) -> Tuple[Dict[str, np.array], str, str, float]:
"""
- validate on all batches of one epoch, i.e. whole val or test dataset
Args:
phase: [str], 'val', 'test'
outputs: [list] of [lists] = [batch_loss, batch_tag_ids, batch_logits] with 3 torch tensors for each batch
Returns:
epoch_metrics [dict] w/ keys 'all_acc', 'fil_f1_micro', .. & values = [np array]
classification_report: [str]
confusion_matrix: [str]
epoch_loss: [float] mean of of all batch losses
"""
print()
np_batch: Dict[str, List[np.array]] = self._convert_output_to_np_batch(outputs)
np_epoch: Dict[
str, Union[np.number, np.array]
] = self._combine_np_batch_to_np_epoch(np_batch)
# epoch metrics
epoch_metrics, epoch_tags = self._compute_metrics(phase, np_epoch)
# classification report
if phase == "test":
confusion_matrix = self._get_confusion_matrix_str(
epoch_tags,
phase=phase,
epoch=self.current_epoch,
)
classification_report = self._get_classification_report(
epoch_tags,
phase=None,
epoch=None,
)
else:
classification_report = ""
confusion_matrix = ""
return epoch_metrics, classification_report, confusion_matrix, np_epoch["loss"]
@staticmethod
def _convert_output_to_np_batch(
outputs: List[torch.Tensor],
) -> Dict[str, List[np.array]]:
"""
- converts pytorch lightning output to np_batch dictionary
Args:
outputs: [list] = [batch_loss, batch_tag_ids, batch_logits] with 3 torch tensors for each batch
Returns:
np_batch: [dict] w/ key-value pairs:
'loss': [list] of <batch_size> x [1D np array]s of length <seq_length>
'tag_ids': [list] of <batch_size> x [1D np array]s of length <seq_length>
'logits' [list] of <batch_size> x [2D np array]s of size <seq_length x num_tags>
"""
return {
"loss": [output[0].detach().cpu().numpy() for output in outputs],
"tag_ids": [
output[1].detach().cpu().numpy() for output in outputs
], # [batch_size, seq_length]
"logits": [
output[2].detach().cpu().numpy() for output in outputs
], # [batch_size, seq_length, num_tags]
}
@staticmethod
def _combine_np_batch_to_np_epoch(
np_batch: Dict[str, List[np.array]]
) -> Dict[str, Union[np.number, np.array]]:
"""
- combine np_batch to np_epoch
Args:
np_batch: [dict] w/ key-value pairs:
'loss': [list] of <batch_size> x [1D np array]s of length <seq_length>
'tag_ids': [list] of <batch_size> x [1D np array]s of length <seq_length>
'logits' [list] of <batch_size> x [2D np array]s of size [<seq_length>, <num_tags>]
Returns:
np_epoch: [dict] w/ key-value pairs:
'loss': [np value]
'tag_ids': [1D np array] of length <batch_size> x <seq_length>
'logits' [2D np array] of size shape [<batch_size> x <seq_length>, <num_tags>]
"""
return {
"loss": np.stack(np_batch["loss"]).mean(),
"tag_ids": np.concatenate(
np_batch["tag_ids"]
), # shape: [dataset_size, seq_length]
"logits": np.concatenate(
np_batch["logits"]
), # shape: [dataset_size, seq_length, num_tags]
}
####################################################################################################################
# 1. COMPUTE METRICS ###############################################################################################
####################################################################################################################
def _compute_metrics(
self, phase: str, _np_epoch: Dict[str, Union[np.number, np.array]]
) -> Tuple[Dict[str, np.array], Dict[str, np.array]]:
"""
- compute loss, acc, f1 scores for size/phase = batch/train or epoch/val-test
Args:
phase: [str], 'train', 'val', 'test'
_np_epoch: [dict] w/ key-value pairs:
'loss': [np value]
'tag_ids': [1D np array] of length <batch_size> x <seq_length>
'logits': [2D np array] of size shape [<batch_size> x <seq_length>, <num_tags>]
Returns:
_epoch_metrics [dict] w/ keys 'all_acc', 'fil_f1_micro', .. & values = [np array]
_epoch_tags [dict] w/ keys 'true', 'pred' & values = [np array]
"""
# batch / dataset
tag_ids = dict()
tag_ids["true"], tag_ids["pred"] = self._reduce_and_flatten(
_np_epoch["tag_ids"], _np_epoch["logits"]
)
tags = {
field: self._convert_tag_ids_to_tags(tag_ids[field])
for field in ["true", "pred"]
}
_epoch_tags = self._get_rid_of_special_tag_occurrences(tags)
self.default_logger.log_debug("phase:", phase)
self.default_logger.log_debug(
"true:", np.shape(tags["true"]), list(set(tags["true"]))
)
self.default_logger.log_debug(
"pred:", np.shape(tags["pred"]), list(set(tags["pred"]))
)
# batch / dataset metrics
_epoch_metrics = {"token_all_loss": _np_epoch["loss"]}
for tag_subset in [
"all",
"fil",
] + self.annotation_plain.classes:
_epoch_metrics.update(
self._compute_metrics_for_tags_subset(
_epoch_tags, phase, tag_subset=tag_subset
)
)
return _epoch_metrics, _epoch_tags
@staticmethod
def _reduce_and_flatten(
_np_tag_ids: np.array, _np_logits: np.array
) -> Tuple[np.array, np.array]:
"""
helper method for _compute_metrics()
reduce _np_logits (3D -> 2D), flatten both np arrays (2D -> 1D)
Args:
_np_tag_ids: [np array] of shape [batch_size, seq_length]
_np_logits: [np array] of shape [batch_size, seq_length, num_tags]
Returns:
true_flat: [np array] of shape [batch_size * seq_length], _np_tag_ids flattened
pred_flat: [np array] of shape [batch_size * seq_length], _np_logits reduced and flattened
"""
true_flat = _np_tag_ids.flatten()
pred_flat = np.argmax(_np_logits, axis=-1).flatten()
return true_flat, pred_flat
def _convert_tag_ids_to_tags(self, _tag_ids: np.array) -> np.array:
"""
helper method for _compute_metrics()
convert tag_ids (int) to tags (str)
special tags [*] have tag_id = -100 and are converted to [S]
Args:
_tag_ids: [np array] of shape [batch_size * seq_length] with [int] elements
Returns:
_tags: [np array] of shape [batch_size * seq_length] with [str] elements
"""
return np.array(
[
self.annotation.classes[int(tag_id)] if tag_id >= 0 else "[S]"
for tag_id in _tag_ids
]
)
@staticmethod
def _get_rid_of_special_tag_occurrences(
_tags: Dict[str, np.array]
) -> Dict[str, np.array]:
"""
helper method for _compute_metrics()
get rid of all elements where '[S]' occurs in true array
Args:
_tags: [dict] w/ keys = 'true', 'pred' and
values = [np array] of shape [batch_size * seq_length]
Returns:
_tags_new: [dict] w/ keys = 'true', 'pred' and
values = [np array] of shape [batch_size * seq_length - # of spec. token occurrences]
"""
pad_indices = np.where(_tags["true"] == "[S]")
return {key: np.delete(_tags[key], pad_indices) for key in ["true", "pred"]}
def _compute_metrics_for_tags_subset(
self, _tags: Dict[str, np.array], _phase: str, tag_subset: str
) -> Dict[str, float]:
"""
helper method for _compute_metrics()
compute metrics for tags subset (e.g. 'all', 'fil')
Args:
_tags: [dict] w/ keys 'true', 'pred' & values = [np array]
_phase: [str], 'train', 'val'
tag_subset: [str], e.g. 'all', 'fil', 'PER'
Returns:
_metrics [dict] w/ keys = metric (e.g. 'all_precision_micro') and value = [float]
"""
_tags_plain = {
field: Tags(_tags[field]).convert_scheme(
source_scheme=self.annotation.scheme,
target_scheme="plain",
)
for field in ["true", "pred"]
}
classes, class_index = self._get_filtered_classes(tag_subset, _tags_plain)
required_tag_groups = (
[tag_subset] if tag_subset in ["all", "fil", "O"] else ["ind"]
)
required_phases = [_phase]
if tag_subset == "O":
levels = ["token"]
else:
levels = ["token", "entity"]
_metrics = dict()
for level in levels:
required_levels = [level]
metrics_to_compute = self.logged_metrics.get_metrics(
required_tag_groups=required_tag_groups,
required_phases=required_phases,
required_levels=required_levels,
)
if len(metrics_to_compute):
ner_metrics = NerMetrics(
_tags["true"] if level == "entity" else _tags_plain["true"],
_tags["pred"] if level == "entity" else _tags_plain["pred"],
level=level,
scheme=self.annotation.scheme if level == "entity" else "plain",
classes=classes if level == "token" else None,
class_index=class_index if level == "entity" else None,
)
ner_metrics.compute(metrics_to_compute)
results = ner_metrics.results_as_dict()
else:
results = dict()
# simple
for metric_type in self.logged_metrics.get_metrics(
required_tag_groups=required_tag_groups,
required_phases=required_phases,
required_levels=required_levels,
required_averaging_groups=["simple"],
exclude=["numberofclasses", "loss"],
):
_metrics[f"{level}_{tag_subset}_{metric_type}"] = results[metric_type]
# micro
for metric_type in self.logged_metrics.get_metrics(
required_tag_groups=required_tag_groups,
required_phases=required_phases,
required_levels=required_levels,
required_averaging_groups=["micro"],
exclude=["numberofclasses"],
):
if required_tag_groups in [["O"], ["ind"]]:
_metrics[f"{level}_{tag_subset}_{metric_type}"] = results[
f"{metric_type}_micro"
]
else:
_metrics[f"{level}_{tag_subset}_{metric_type}_micro"] = results[
f"{metric_type}_micro"
]
# macro
for metric_type in self.logged_metrics.get_metrics(
required_tag_groups=required_tag_groups,
required_phases=required_phases,
required_levels=required_levels,
required_averaging_groups=["macro"],
):
_metrics[f"{level}_{tag_subset}_{metric_type}_macro"] = results[
f"{metric_type}_macro"
]
return _metrics
def _get_filtered_classes(
self, _tag_subset: str, _tags_plain: Optional[Dict[str, np.array]] = None
) -> Tuple[List[str], Optional[int]]:
"""
helper method for _compute_metrics()
get list of filtered tags corresponding to _tag_subset name
Args:
_tag_subset: [str], e.g. 'all', 'fil', 'PER'
_tags_plain: [dict] w/ keys 'true', 'pred' & values = [np array]
Returns:
_filtered_classes: list of filtered tags
_filtered_class_index: filtered tags index in case of single _filtered_class, ignoring "O"
"""
if _tag_subset == "all":
_filtered_classes = self.annotation_plain.classes
_filtered_class_index = None
elif _tag_subset == "fil":
_filtered_classes = [
tag for tag in self.annotation_plain.classes if tag != "O"
]
_filtered_class_index = None
else:
assert _tags_plain is not None, f"ERROR! need to provide _tags_plain"
classes_plain_filtered = [
elem
for elem in self.annotation_plain.classes
if (elem in _tags_plain["true"] or elem in _tags_plain["pred"])
and elem != "O"
]
_filtered_classes = [_tag_subset]
try:
_filtered_classes_index_list = [
classes_plain_filtered.index(_tag_subset)
]
assert len(_filtered_classes_index_list) == 1
_filtered_class_index = _filtered_classes_index_list[0]
except ValueError:
_filtered_class_index = None
return _filtered_classes, _filtered_class_index
####################################################################################################################
# 2. CLASSIFICATION REPORT #########################################################################################
####################################################################################################################
def _get_classification_report(
self,
epoch_tags: Dict[str, np.array],
phase: Optional[str] = None,
epoch: Optional[int] = None,
) -> str:
"""
- get token-based (sklearn) & chunk-based (seqeval) classification report
Args:
epoch_tags: [dict] w/ keys 'true', 'pred' & values = [np array]
phase: [str], 'train', 'val', 'test'
epoch: [int]
Returns:
classification_report: [str]
"""
warnings.filterwarnings("ignore")
epoch_tags_plain = {
field: Tags(epoch_tags[field]).convert_scheme(
source_scheme=self.annotation.scheme,
target_scheme="plain",
)
for field in ["true", "pred"]
}
# token-based classification report, plain tags
classes_filtered, _ = self._get_filtered_classes("fil")
classification_report: str = ""
if phase is not None and epoch is not None:
classification_report += f"\n>>> Phase: {phase} | Epoch: {epoch}"
classification_report += (
"\n--- token-based, plain tag (sklearn) classification report on fil ---\n"
)
classification_report += classification_report_sklearn(
epoch_tags_plain["true"], epoch_tags_plain["pred"], labels=classes_filtered
)
# chunk-based classification report
epoch_tags_chunk = dict()
for field in ["true", "pred"]:
epoch_tags_chunk[field] = Tags(epoch_tags[field]).convert_scheme(
source_scheme=self.annotation.scheme,
target_scheme=self.annotation.scheme
if self.annotation.scheme in ["bio", "bilou"]
else "bio",
)
self.default_logger.log_debug("> annotation.scheme:", self.annotation.scheme)
self.default_logger.log_debug(
"> epoch_tags_chunk[true]:", list(set(epoch_tags_chunk["true"]))
)
self.default_logger.log_debug(
"> epoch_tags_chunk[pred]:", list(set(epoch_tags_chunk["pred"]))
)
classification_report += (
"\n--- entity-based (seqeval) classification report on fil ---\n"
)
classification_report += classification_report_seqeval(
[epoch_tags_chunk["true"]], [epoch_tags_chunk["pred"]], suffix=False
)
warnings.resetwarnings()
return classification_report
def _get_confusion_matrix_str(
self,
epoch_tags: Dict[str, np.array],
phase: Optional[str] = None,
epoch: Optional[int] = None,
) -> str:
"""
- get token-based (sklearn) confusion matrix
Args:
epoch_tags: [dict] w/ keys 'true', 'pred' & values = [np array]
phase: [str], 'train', 'val', 'test'
epoch: [int]
Returns:
confusion_matrix_str: [str] with confusion matrix as pd dataframe
"""
warnings.filterwarnings("ignore")
epoch_tags_plain = {
field: Tags(epoch_tags[field]).convert_scheme(
source_scheme=self.annotation.scheme,
target_scheme="plain",
)
for field in ["true", "pred"]
}
# token-based confusion matrix, plain tags
confusion_matrix = confusion_matrix_sklearn(
epoch_tags_plain["true"],
epoch_tags_plain["pred"],
labels=self.annotation_plain.classes,
)
df_confusion_matrix =
|
pd.DataFrame(confusion_matrix)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 15:28:36 2020
@author: Khmira
"""
# import necessary modules
import pandas as pd
#import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn import linear_model
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
#from sklearn.preprocessing import OneHotEncoder
#from sklearn.tree import DecisionTreeRegressor
# load the data set
data = pd.read_csv('data csv.csv')
# print info about columns in the dataframe
print(data.info())
#what kinds of data it contains
print(data.keys())
#a description of what the features are:
#print(data.DESCR)
# creating dummies for GAME
dummies_GAME = pd.get_dummies(data.GAME)
dummies_GAME.columns=['DICE','DICE2','FREE','FREE2']
rows = dummies_GAME.shape[0]
i = 0
while i < rows:
if dummies_GAME['DICE'][i] < dummies_GAME['DICE2'][i]:
dummies_GAME['DICE'][i]=dummies_GAME['DICE2'][i]
i+=1
i = 0
while i < rows:
if dummies_GAME['FREE'][i] < dummies_GAME['FREE2'][i]:
dummies_GAME['FREE'][i]=dummies_GAME['FREE2'][i]
i+=1
data =
|
pd.concat([data,dummies_GAME],axis='columns')
|
pandas.concat
|
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
df['state'] = df['state'].replace('Pass', 'COMPLETED')
df['state'] = df['state'].replace('Failed', 'FAILED')
df['state'] = df['state'].replace('Killed', 'CANCELLED')
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_parser(df):
trace = Trace()
for _, series in df.iterrows():
trace.append_job(Job(series))
trace.sort_jobs('submit_time')
return trace
def logger_init(file):
logger = logging.getLogger()
handler_file = logging.FileHandler(f'{file}.log', 'w')
handler_stream = logging.StreamHandler() # sys.stdout
logger.setLevel(logging.INFO)
handler_file.setLevel(logging.INFO)
handler_stream.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s | %(processName)s | %(message)s', datefmt='%Y %b %d %H:%M:%S')
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
logger.addHandler(handler_file)
logger.addHandler(handler_stream)
return logger
def cluster_concatenate(policy, placer, log_dir, dir):
prefix = f'{policy}_{placer}'
if not os.path.exists(log_dir+'/all'):
os.mkdir(log_dir+'/all')
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
'''Log'''
cluster_log = pd.DataFrame()
for vc in vcs:
vc_log = pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_log.csv')
cluster_log =
|
pd.concat([cluster_log, vc_log])
|
pandas.concat
|
# import bibtexparser
from fixtex import fix_bib
import utool as ut
import numpy as np
import pandas as pd
pd.options.display.max_rows = 20
pd.options.display.max_columns = 40
pd.options.display.width = 160
pd.options.display.float_format = lambda x: '%.4f' % (x,)
# PARSE DATABASE
# full_bibman = fix_bib.BibMan('FULL.bib', doc='thesis')
bibman = fix_bib.BibMan('final-bib.bib', doc='thesis')
bibman.sort_entries()
bibman.write_testfile()
bibman.printdiff()
bibman.save()
print('bibman.unregistered_pubs = {}'.format(ut.repr4(bibman.unregistered_pubs)))
for pub in bibman.unregistered_pubs:
if 'None' in str(pub):
print(ut.repr4(pub.entry))
df = pd.DataFrame.from_dict(bibman.cleaned, orient='index')
del df['abstract']
# want = text.count('@')
want = len(df)
# paged_items = df[~pd.isnull(df['pub_abbrev'])]
# has_pages = ~pd.isnull(paged_items['pages'])
# print('have pages {} / {}'.format(has_pages.sum(), len(has_pages)))
# print(ut.repr4(paged_items[~has_pages]['title'].values.tolist()))
df.loc[
|
pd.isnull(df['pub_type'])
|
pandas.isnull
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
import pathlib
import numpy as np
import pandas as pd
sys.path.insert(0, "../../scripts")
from utils import load_data
from pycytominer.cyto_utils import infer_cp_features
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from sklearn.decomposition import PCA
from tensorflow import keras
from vae import VAE
from tensorflow.keras.models import Model, Sequential
import seaborn
import random as python_random
import tensorflow as tf
import umap
import seaborn as sns
# In[2]:
data_splits = ["train", "test", "valid", "complete"]
data_dict = load_data(data_splits)
# In[3]:
# Prepare data for training
meta_features = infer_cp_features(data_dict["train"], metadata=True)
cp_features = infer_cp_features(data_dict["train"])
train_features_df = data_dict["train"].reindex(cp_features, axis="columns")
train_meta_df = data_dict["train"].reindex(meta_features, axis="columns")
test_features_df = data_dict["test"].reindex(cp_features, axis="columns")
test_meta_df = data_dict["test"].reindex(meta_features, axis="columns")
valid_features_df = data_dict["valid"].reindex(cp_features, axis="columns")
valid_meta_df = data_dict["valid"].reindex(meta_features, axis="columns")
complete_features_df = data_dict["complete"].reindex(cp_features, axis="columns")
complete_meta_df = data_dict["complete"].reindex(meta_features, axis="columns")
# In[4]:
decoder_beta = keras.models.load_model("models/level4Decoder_beta")
encoder_beta = keras.models.load_model("models/level4Encoder_beta")
decoder_vanilla = keras.models.load_model("models/level4Decoder_vanilla")
encoder_vanilla = keras.models.load_model("models/level4Encoder_vanilla")
decoder_mmd = keras.models.load_model("models/level4Decoder_mmd")
encoder_mmd = keras.models.load_model("models/level4Encoder_mmd")
# In[5]:
reconstruction_beta = pd.DataFrame(decoder_beta.predict(encoder_beta.predict(test_features_df)[2]))
reconstruction_beta['label'] = 'β-VAE reconstruction'
reconstruction_vanilla = pd.DataFrame(decoder_vanilla.predict(encoder_vanilla.predict(test_features_df)[2]))
reconstruction_vanilla['label'] = 'Vanilla VAE reconstruction'
reconstruction_mmd = pd.DataFrame(decoder_mmd.predict(encoder_mmd.predict(test_features_df)[2]))
reconstruction_mmd['label'] = 'MMD-VAE reconstruction'
# In[6]:
simulated_test_df = pd.DataFrame(np.random.normal(size=(5030, 90)), columns=np.arange(0,90))
reconstruction_of_simulated_test_beta = pd.DataFrame(decoder_beta.predict(simulated_test_df))
reconstruction_of_simulated_test_beta['label'] = 'β-VAE simulation'
reconstruction_of_simulated_test_vanilla = pd.DataFrame(decoder_vanilla.predict(simulated_test_df))
reconstruction_of_simulated_test_vanilla['label'] = 'Vanilla VAE simulation'
reconstruction_of_simulated_test_mmd = pd.DataFrame(decoder_mmd.predict(simulated_test_df))
reconstruction_of_simulated_test_mmd['label'] = 'MMD-VAE simulation'
test_features_df.columns = np.arange(0,685)
test_features_df['label'] = 'Original'
# In[7]:
beta_df = pd.concat([test_features_df, reconstruction_beta,reconstruction_of_simulated_test_beta])
mmd_df = pd.concat([test_features_df,reconstruction_mmd,reconstruction_of_simulated_test_mmd])
vanilla_df =
|
pd.concat([test_features_df,reconstruction_vanilla,reconstruction_of_simulated_test_vanilla])
|
pandas.concat
|
import json
from collections import OrderedDict
from pathlib import Path
import numpy as np
from pipeline.DatasetTransformer import Transformer
from utils.shared_names import *
import pandas as pd
import os
from holders.Dataset import Dataset
def get_files_recursively(path_to_dir, contain_filter):
if not os.path.isdir(path_to_dir):
return [path_to_dir]
paths = []
for r, d, f in os.walk(path_to_dir):
for file in f:
if contain_filter in file:
paths.append(os.path.join(r, file))
return paths
def extract_optimal_features(dataset_path):
df =
|
pd.read_csv(dataset_path)
|
pandas.read_csv
|
from financeAPI.financeAPI_lib import FinanceAPI as fapi
from pathlib import Path
import json
import pandas as pd
class Stock:
n_stocks = 0
API = fapi()
def __init__(self, symbol:str):
self.symbol = symbol
self.fundamentalsAnnual = pd.DataFrame(columns=['time', 'year', 'revenue', 'grossProfit', 'operatingIncome', 'netProfit',
'grossMargin', 'operatingMargin', 'profitMargin'])
self.fundamentalsQuarter = pd.DataFrame(columns=['time', 'quarter', 'revenue', 'grossProfit', 'operatingIncome', 'netProfit',
'grossMargin', 'operatingMargin', 'profitMargin'])
self.growthAnnual = pd.DataFrame(columns=['time', 'revGrowth', 'profitGrowth'])
self.growthQuarter =
|
pd.DataFrame(columns=['time', 'revGrowth', 'profitGrowth'])
|
pandas.DataFrame
|
import dash
from dash import dcc, dash_table
from dash import html
from dash.dependencies import Output, Input, State
import plotly.express as px
import plotly.graph_objects as go
from plotly.validators.scatter.marker import SymbolValidator
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import requests
import time
# Data reading with path information (val)
df_val = pd.read_csv('C:\\Users\\vince\\OneDrive\\Documents\\data_scientist\\python_work\\projets\\07_loan_customer_scoring\\production\\savefig\\final_model\\cleaning\\df_val_cleaned.csv',sep=',')
id_lst = list(df_val['SK_ID_CURR'])
options=[{'label': i, 'value': i} for i in id_lst]
#Functions___________________________________________________________________________________________
def DisplayImagePIL(image, **kwargs):
encoded_image = pil_to_b64(image, enc_format='png')
def pil_to_b64(im, enc_format="png", **kwargs):
"""
Converts a PIL Image into base64 string for HTML displaying
:param im: PIL Image object
:param enc_format: The image format for displaying. If saved the image will have that extension.
:return: base64 encoding
"""
buff = BytesIO()
im.save(buff, format=enc_format, **kwargs)
encoded = base64.b64encode(buff.getvalue()).decode("utf-8")
return encoded
#Options___________________________________________________________________________________________
# columns number:
columns_number = df_val.shape[1]
# sample row for the initialisation of the table
df_sample = pd.DataFrame([np.zeros(columns_number)], columns=df_val.columns)
df_val =
|
pd.concat([df_sample,df_val], ignore_index=True)
|
pandas.concat
|
# coding: utf-8
__author__ = 'ersh'
__email__ = '<EMAIL>'
__version__ = '1.1113'
#There is a link to group github where you can find library manoelgadi12 and all the files
#and instructions
#https://github.com/ersh24/manoelgadi12
################
#L Automated data cleaning
####################
import pandas as pd
import numpy as np
import re
####################
#L Automated data cleaning
####################
def Faa1():
import pandas as pd
import numpy as np
import re
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
np.seterr(invalid='ignore')
print("Original Data Frame\n", data)
#==============================================================================
# GOAL: Clean files trying to get numerical columns:
# - Usually NaN are 0 as there is no value.
# - Whitespaces which can appear when copying data are noisy as they convert
# numbers into strings that are not operable.
# - Outliers usually are errors which can modify average values. Then it is
# better to sustitute them for more reasonable values.
#==============================================================================
# Replace all NaN with 0
data.fillna(0, inplace=True)
# If all the values in the column are float and whitespaces (one or several),
# replaces the latter with 0.
# Removes whitespaces before or after the numbers.
for column in data.columns:
if data[column].dtypes in ["object"]:
change = True
# The column is going to change if all the values (without whitespaces)
# match numbers. Numbers need to have int side, though it could be easily
# changed to accept numbers like .35 as 0.35
for i in range (0,len(data)):
if (re.match(r"[-+]?\d+(\.\d+)?$", str(data[column][i]).strip()) is None):
if (not pd.isnull(data[column][i]) and data[column][i].strip() != ''):
change = False
if change:
# If the value is a set of whitespaces, they are replaced by 0, otherwise
# whitespaces are deleted and finally the column type is changed to numeric
data[column]= data[column].replace(r"^\s+$", '0', regex=True)
data[column]= data[column].replace(r"\s+", '', regex=True)
data[column] = pd.to_numeric(data[column], errors='coerce')
# Replace outliers for the border values
# For each column several values which define it, are created
# Values out of the upper and lower limits are replaced for the limit values
datadict = {}
for column in data.columns:
if (data[column].dtypes in ["int64", "float64"]):
max = np.max(data[column])
p75 = data[column].quantile(0.75)
p50 = data[column].quantile(0.5)
p25 = data[column].quantile(0.25)
min = np.min(data[column])
mean = data[column].mean()
iqr = p75 - p25
lower = p25-1.5*iqr
upper = p75 + 1.5*iqr
valueslist = [lower, min, p25, p50, mean, p75, max, upper]
tagslist = ["LOWER", "MIN", "P25", "P50", "Mean", "P75", "MAX", "UPPER"]
datadict.update({column : pd.Series([data[column].dtypes]+valueslist, index=["Type"]+tagslist)})
# If it is binary don't detect outliers
if (set(data[column]) == {0,1}):
continue
# Loops the values in a column looking for extreme values
# When it finds extreme values sustitutes them, offering several choices
for i in range (0,len(data)):
if (data[column][i] > upper):
data.set_value(i, column, upper)
if (data[column][i] < lower):
data.set_value(i, column, lower)
print ("\nInfo about the columns to transform:\n", pd.DataFrame(datadict),"\n")
print("Transformed Data Frame\n", data)
data.to_csv("transformed.csv", index=False)
####################
#L Human assisted data cleaning
####################
# Human assisted data cleaning
def HAdatacleaning():
####################
#L Human assisted data cleaning
####################
import pandas as pd
import numpy as np
import re
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
data =
|
pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Autor:
<NAME>.
Fecha:
18 de Diciembre del 2017.
Contenido:
Uso de algoritmos de Clustering con diferentes librerías.
Inteligencia de Negocio.
Grado en Ingeniería Informática.
Universidad de Granada.
"""
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
'''
import time
import os
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from scipy.cluster.hierarchy import dendrogram, linkage
import pylab
import numpy as np
import sklearn.cluster as cluster
#Saco el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
from scipy.cluster import hierarchy
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from imblearn.metrics import geometric_mean_score
from sklearn import preprocessing
from sklearn import metrics
from sklearn import preprocessing
from math import floor
##########################################################################################################
#Función para extraer las medias y desviaciones típicas de los valores de cada cluster para cada variable#
##########################################################################################################
def calcularMediaStd(cluster):
vars = list(cluster)
vars.remove('cluster')
return dict(np.mean(cluster[vars],axis=0)), dict(np.std(cluster[vars],axis=0))
def DFClusterConMedias(dataFrame):
listaClusters = list(set(dataFrame['cluster']))
DFMedia = pd.DataFrame()
DFStd = pd.DataFrame()
for cluster_n in listaClusters:
cluster_i = dataFrame[dataFrame['cluster'] == cluster_n]
DicMedia, DicStd = calcularMediaStd(cluster=cluster_i)
auxDFMedia = pd.DataFrame(DicMedia,index=[str(cluster_n)])
auxDFStd = pd.DataFrame(DicStd,index=[str(cluster_n)])
DFMedia = pd.concat([DFMedia, auxDFMedia])
DFStd = pd.concat([DFStd, auxDFStd])
return DFMedia, DFStd
##########################################################################################################
#Función para modificar los nombres de las columnas de los dataframes con las medias y las desviaciones#
##########################################################################################################
def DFClusterColumnasMediasNuevas(dataFrame):
listaColumnas = list(dataFrame.columns.values)
nuevasColumnas = []
for nombre in listaColumnas:
nombreNuevo = nombre.replace('TOT_','') + '_MED'
nuevasColumnas.append(nombreNuevo)
dataFrame.columns = nuevasColumnas
def DFClusterColumnasStdNuevas(dataFrame):
listaColumnas = list(dataFrame.columns.values)
nuevasColumnas = []
for nombre in listaColumnas:
nombreNuevo = nombre.replace('TOT_','') + '_STD'
nuevasColumnas.append(nombreNuevo)
dataFrame.columns = nuevasColumnas
############################################################################################################
#Función para concatenar los dataframes con las medias y las desviaciones y así tenerlo junto#
############################################################################################################
def DFClusterConcatMediasStd(dataframe1, dataframe2):
DFNuevo = pd.concat([dataframe1, dataframe2], axis=1)
return DFNuevo
###############################################################################################
#Función para almacenar todas las métricas producidas por los algoritmos en un único dataframe#
###############################################################################################
def DFValoresAlgoritmos(algoritmo, tiempo, nClusters, CH, SC, DFTodosDatos):
df1 = pd.DataFrame({'Algoritmo':[algoritmo],
'N.Clusters':[int(nClusters)],
'Tiempo':[tiempo],
'CH':[CH],
'SH':[SC]})
return df1
#######################################
#Función para pintar un scatter matrix#
#######################################
def PintarScatterMatrix(DFclusterSinOutliersAux, scatter_dir, nombreAlgoritmo, casoEstudio):
plt.figure()
variables = list(DFclusterSinOutliersAux)
variables.remove('cluster')
sns_plot = sns.pairplot(DFclusterSinOutliersAux, vars=variables, hue="cluster", palette='Paired',
plot_kws={"s": 25},
diag_kind="hist") # en hue indicamos que la columna 'cluster' define los colores
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
plt.savefig(scatter_dir + nombreAlgoritmo+ "Variacion-1-" + casoEstudio)
plt.close()
################################
#Función para pintar un heatmap#
################################
def PintarHeatmap(DFMediasNormal, heatmap_dir, nombreAlgoritmo, casoEstudio, clusters_restantes):
plt.figure()
# Configuramos el tamaño de la gráfica para que nos quepan todas las variables del eje X.
plt.subplots(figsize=(20, 10))
sns.heatmap(data=DFMediasNormal, annot=True, linewidths=0.5, yticklabels=clusters_restantes, cmap='YlGn')
plt.xticks(rotation=0)
plt.yticks(rotation=0)
plt.savefig(heatmap_dir + nombreAlgoritmo+ "Variacion-1-" + casoEstudio)
plt.close()
###############################################
#Función para pintar un heatmap con dendograma#
###############################################
def PintarHeatmapConDendograma(DFMediasNormal, dendograma_dir, nombreAlgoritmo, casoEstudio, clusters_restantes):
plt.figure()
linkage_array = hierarchy.ward(DFMediasNormal)
# Lo pongo en horizontal para compararlo con el generado por seaborn.
hierarchy.dendrogram(linkage_array, orientation='left')
sns.clustermap(DFMediasNormal, method='ward', col_cluster=False, figsize=(20, 10),
yticklabels=clusters_restantes, linewidths=0.5, cmap='YlGn')
plt.savefig(dendograma_dir + nombreAlgoritmo + casoEstudio)
plt.close()
#################################################
#Función para pintar gráfica del método de Elbow#
#################################################
def PintarGraficaElbow(numeroClusters, errorClusters, scatter_dir):
fig, ax = plt.subplots()
ax.plot(numeroClusters, errorClusters)
ax.set(xlabel='Número de Clusters', ylabel='Inertia de Cluster',
title='Comparativa K-means')
ax.grid()
plt.savefig(scatter_dir + "ComparativaKMeans.png")
plt.close()
def PintarDendograma(DFMediasNormal, dendograma_dir, nombreAlgoritmo, casoEstudio):
linkage_array = hierarchy.ward(DFMediasNormal)
plt.figure()
plt.clf()
hierarchy.dendrogram(linkage_array, orientation='left') # lo pongo en horizontal para compararlo con el generado por seaborn
plt.savefig(dendograma_dir + nombreAlgoritmo + "DendogramaSolo" + casoEstudio)
plt.close()
####################
#Programa principal#
####################
if __name__ == "__main__":
accidentes = pd.read_csv('accidentes_2013.csv')
#___________________________________________________________________________________
#DATASET: Accidentes ocurridos en la comunidad autónoma de Madrid. 14.000
# AccidentesMadrid
#__________________________________________________________________________________
'''
subset = accidentes.loc[
accidentes.COMUNIDAD_AUTONOMA.str.contains("Madrid")
]
'''
#___________________________________________________________________________________
#DATASET: Accidentes ocurridos en Andalucía. 13.944
# AccidentesAndalucia
#__________________________________________________________________________________
'''
subset = accidentes.loc[
accidentes.COMUNIDAD_AUTONOMA.str.contains("Andalucía")
]
'''
#___________________________________________________________________________________
#DATASET: Accidentes con conlisión entre vehículos para el mes de Diciembre. 4.047
# ColisionVehiculosDiciembre
#__________________________________________________________________________________
'''NO VOLVER HA HACER.
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes['MES'] == 12)
]
'''
#_____________________________________________________________________________________
#DATASET: Accidentes con conlisión entre vehículos para el mes de Agosto.4.275
# ColisionVehiculosAgosto
#____________________________________________________________________________________
'''NO VOLVER HA HACER.
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes['MES'] == 6)
]
'''
#_____________________________________________________________________________________
#DATASET: Accidentes en zonas urbanas y vías urbanas. 10.000
# ZonaUrbanaAtropellos
#_____________________________________________________________________________
'''
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Atropello"))
&(accidentes.ZONA_AGRUPADA.str.contains("VÍAS URBANAS"))
&(accidentes.ZONA.str.contains("ZONA URBANA"))
]
'''
#_____________________________________________________________________________
#DATASET: Accidentes con conlisiones entre vehículos en zonas urbanas y vías urbanas. 30.705
# ZonaUrbanaVehiculos
#_____________________________________________________________________________
'''
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes.ZONA_AGRUPADA.str.contains("VÍAS URBANAS"))
&(accidentes.ZONA.str.contains("ZONA URBANA"))
]
'''
#_____________________________________________________________________________
#DATASET: Accidentes con conlisiones entre vehículos en un trazado con curva suave. 2.542
# TrazadoCurvaSuave.png
#_____________________________________________________________________________
'''
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes.TRAZADO_NO_INTERSEC.str.contains("CURVA SUAVE"))
]
'''
#_____________________________________________________________________________
#DATASET: Accidentes con conlisiones entre vehículos en un trazado con curva fuerte. 1.143
# TrazadoCurvaFuerte.png
#_______________________________________________________________________________
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes.TRAZADO_NO_INTERSEC.str.contains("CURVA FUERTE"))
]
#_____________________________________________________________________________
#DATASET: Accidentes con conlisiones entre vehículos en un trazado con curva fuerte. 20.000
# TrazadoRecta.png
#_______________________________________________________________________________
'''
subset = accidentes.loc[
(accidentes.TIPO_ACCIDENTE.str.contains("Colisión de vehículos"))
&(accidentes.TRAZADO_NO_INTERSEC.str.contains("RECTA"))
]
'''
#_____________________________________________________________________________
#seleccionar variables de interés para clustering
usadas = ['TOT_VICTIMAS', 'TOT_MUERTOS', 'TOT_HERIDOS_GRAVES', 'TOT_HERIDOS_LEVES', 'TOT_VEHICULOS_IMPLICADOS']
usadasOtra = ['TOT_HERIDOS_GRAVES','TOT_HERIDOS_LEVES', 'TOT_MUERTOS','TOT_VEHICULOS_IMPLICADOS', 'TOT_VICTIMAS']
X = subset[usadas]
print("Tamaño del conjunto de datos extraído: ",len(X), end='\n')
tamanio = len(X)
#Indicamos la semilla al conjunto actual de dataset.
X = X.sample(tamanio, random_state=123456789)
#Inicialización de los distintos algoritmos de la librería SKLEARN.
X_normal = preprocessing.normalize(X, norm='l2')
k_means = cluster.KMeans(init='k-means++', n_clusters=4, n_init=5)
k_means2 = cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)
k_means3 = cluster.KMeans(init='k-means++', n_clusters=6, n_init=5)
k_means4 = cluster.KMeans(init='k-means++', n_clusters=7, n_init=5)
k_means5 = cluster.KMeans(init='k-means++', n_clusters=8, n_init=5)
k_means6 = cluster.KMeans(init='k-means++', n_clusters=9, n_init=5)
#_____________________________________________________________________________
mbkm = cluster.MiniBatchKMeans(init='k-means++',n_clusters=4, n_init=5,
max_no_improvement=10, verbose=0)
#_____________________________________________________________________________
ward = cluster.AgglomerativeClustering(n_clusters=20,linkage='ward')
#_____________________________________________________________________________
#dbscan = cluster.DBSCAN(eps=0.1,min_samples=10)
dbscan = cluster.DBSCAN(eps=0.5, min_samples=1000)
#_____________________________________________________________________________
#birch = cluster.Birch(threshold=0.1, n_clusters=4)
birch = cluster.Birch(threshold=0.1, n_clusters=3)
#_____________________________________________________________________________
#spectral = cluster.SpectralClustering(n_clusters=4)
spectral = cluster.SpectralClustering(n_clusters=9)
#_____________________________________________________________________________
#bandwidth = cluster.estimate_bandwidth(X_normal, quantile=0.2, n_samples=tamanio)
meanshift = cluster.MeanShift( bin_seeding=True)
#_____________________________________________________________________________
comparativaKMeans = False
'''
clustering_algorithms = (("K-means", k_means),
("K-means", k_means2),
("K-means", k_means3),
("K-means", k_means4),
("K-means", k_means5),
("K-means", k_means6)
)
'''
#_____________________________________________________________________________
clustering_algorithms = (("K-means", k_means6),
#("MiniBatchKMeans", mbkm),
#("Birch", birch),
#("Ward", ward),
("DBSCAN", dbscan),
#("MeanShift",meanshift),
("Spectral", spectral)
)
#_____________________________________________________________________________
casoEstudio = "TrazadoCurvaFuerte.png"
script_dir = os.getcwd()
heatmap_dir = os.path.join(script_dir, 'heatmap/')
dendograma_dir = os.path.join(script_dir, 'dendograma/')
scatter_dir = os.path.join(script_dir, 'scattermatrix/')
#Variables para almacenar datos de interés de los algoritmos para mostrarlos
#posteriormente.
errorClustersKmean = []
clusterKmean =[]
tiempoPorAlgoritmo = {}
DFTodosDatos = pd.DataFrame(columns=['Algoritmo', 'N.Clusters','Tiempo', 'CH', 'SH'])
#_____________________________________________________________________________
#Ejecución de los distintos algoritmos previamente inicializados.
print('_______________________________________________________')
for name, algorithm in clustering_algorithms:
#print('{:19s}'.format(name), end='')
t = time.time()
clusterPredict = algorithm.fit_predict(X_normal)
tiempo = time.time() - t
numeroClusterInicial = len(set(clusterPredict))
#Esto nos sirve para el método de Elbow
if (name is 'K-means') and (comparativaKMeans):
#print("Inertia: {:.5f}".format(algorithm.inertia_))
clusterKmean.append(numeroClusterInicial)
errorClustersKmean.append(algorithm.inertia_)
#Sirve para añadir una nueva columna llamada cluster que contiene los
# datos del clusterPrecit se convierte la asignación de clusters a DataFrame
columnaClusters = pd.DataFrame(clusterPredict,index=X.index,columns=['cluster'])
#se añade como columna a X
datasetConCluster =
|
pd.concat([X, columnaClusters], axis=1)
|
pandas.concat
|
from .util import abbreviations, extractText
from requests import get
from bs4 import BeautifulSoup
from scipy import stats
import numpy as np
import pandas as pd
import re
class PWR(object):
def __init__(self, weight=1, regress_to=None, values=None):
self.weight = weight
self.regress_to = regress_to
if values is None:
self.values = None
else:
self.values = values.copy()
def calculate(self, **kwargs):
self.pwrcol = [x for x in list(self.values) if x not in ['Team','Games Played']][0]
return self
def regress(self, df):
self.values[self.pwrcol] = self.regress_to.regress(df, self.pwrcol)
def addGamesPlayed(self, gamelog):
if 'Games Played' not in self.values:
grouped = gamelog.groupby('Team')['Games Played'].first().reset_index()
self.values = pd.merge(self.values, grouped, on='Team')
class SRS(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
grouped = kwargs['gamelog'].groupby('Team').agg({'Difference':'sum','Opponent':lambda x: list(x)})
grouped['Games Played'] = grouped['Opponent'].str.len()
grouped['Margin'] = grouped['Difference'].values / grouped['Games Played'].values
grouped['SRS'] = grouped['Margin']
grouped['OldSRS'] = grouped['Margin']
teams = grouped.to_dict('index')
for i in range(10000):
delta = 0.0
for name, team in teams.items():
sos = 0.0
for opponent in team['Opponent']:
sos += teams[opponent]['SRS']
teams[name]['OldSRS'] = team['SRS']
teams[name]['SRS'] = team['Margin'] + (sos / team['Games Played'])
delta = max(delta, abs(teams[name]['SRS'] - teams[name]['OldSRS']))
if delta < 0.001:
break
srs_sum = 0.0
for name, team in teams.items():
srs_sum += teams[name]['SRS']
srs_avg = srs_sum / len(teams)
for name, team in teams.items():
teams[name]['SRS'] = team['SRS'] - srs_avg
df = pd.DataFrame.from_dict(teams, orient='index').reset_index()
self.values = df.rename({'index':'Team'}, axis=1)[['Team','SRS']]
self.pwrcol = 'SRS'
return self
class FPI(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
url = 'https://www.espn.com/nfl/fpi'
html = BeautifulSoup(get(url).text, features='lxml')
teams = [x.text for x in html.select('div[class*=FPI__Table] > div > table > tbody')[0].find_all('tr')]
table = html.select('div[class*=FPI__Table] > div > div > div > table > tbody')[0].find_all('tr')
vals = [{'Team':'Washington Football Team' if teams[i] == 'Washington' else teams[i],
'FPI':float(row.find_all('td')[1].text)} for i, row in enumerate(table)]
self.values = pd.DataFrame(vals)
self.pwrcol = 'FPI'
return self
class DVOA(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
url = 'https://www.footballoutsiders.com/stats/nfl/team-efficiency/' + str(kwargs['season'])
html = BeautifulSoup(get(url).text, features='lxml')
tbl = html.select('table[class*=stats]')[0]
data = pd.read_html(str(tbl), header=0)[0].values.tolist()
data = [[abbreviations[x[1]], float(x[4].replace('%',''))] for x in data if '%' in x[4]]
self.values = pd.DataFrame(data, columns=['Team','DVOA'])
self.pwrcol = 'DVOA'
return self
class Sagarin(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
url = 'https://www.usatoday.com/sports/nfl/sagarin/' + str(kwargs['season']) + '/rating/'
html = BeautifulSoup(get(url).text, features='lxml')
tbltext = html.select('section[id=section_sports]')[0].text
tbltext = tbltext.replace('nbsp','').replace('\xa0','')
tbltext = tbltext.replace('49ers','XXers')
tbltext = extractText(tbltext, delim_left='HOME ADVANTAGE=', delim_right='__')
tbltext = extractText(tbltext, delim_left=']&', delim_right='')
tbltext = ''.join([x for x in tbltext if x not in ['&','(',')','|']])
pattern = re.compile('([a-zA-Z ]+[ ])[=]([^a-zA-Z]*)')
teamlist = []
for (team, stats) in re.findall(pattern, tbltext):
teamname = team.strip().replace('XXers','49ers').replace('Football','Football Team')
teamlist.append({'Team':teamname,'Sagarin':float(stats.split()[0])})
self.values = pd.DataFrame(teamlist)
self.pwrcol = 'Sagarin'
return self
class PWRsystems(object):
def __init__(self, regress_to=None, srs=None, fpi=None, dvoa=None, sagarin=None, others=None):
self.regress_to = regress_to
self.systems = []
if (srs is None) and (fpi is None) and (dvoa is None) and (sagarin is None) and (others is None):
self.systems.append(SRS())
self.systems.append(FPI())
self.systems.append(DVOA())
self.systems.append(Sagarin())
else:
pairs = [(srs, SRS),(fpi, FPI),(dvoa, DVOA),(sagarin, Sagarin)]
for system in [{'Arg':x,'Class':y} for x, y in pairs]:
if type(system['Arg']) is bool:
if system['Arg']:
self.systems.append(system['Class']())
elif system['Arg'] is not None:
self.systems.append(system['Arg'])
if others is not None:
if isinstance(others, PWR):
self.systems.append(others)
else:
for system in others:
self.systems.append(system)
def combine(self):
self.combined = self.systems[0].values[['Team']]
for system in self.systems:
self.combined =
|
pd.merge(self.combined, system.values, on='Team', suffixes=('','_'))
|
pandas.merge
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Helper functions for Copy Number Variations (CNV).
"""
import sys
import logging
import os.path as op
import numpy as np
import numpy.ma as ma
import pandas as pd
import pysam
from collections import Counter, defaultdict
from itertools import groupby
from multiprocessing import Pool
from random import choice
from pybedtools import BedTool, cleanup, set_tempdir
from jcvi.algorithms.formula import get_kmeans
from jcvi.apps.grid import MakeManager
from jcvi.utils.aws import glob_s3, push_to_s3, sync_from_s3
from jcvi.utils.cbook import percentage
from jcvi.apps.base import OptionParser, ActionDispatcher, getfilesize, mkdir, popen, sh
autosomes = ["chr{}".format(x) for x in range(1, 23)]
sexsomes = ["chrX", "chrY"]
allsomes = autosomes + sexsomes
# See: http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
PAR = [("chrX", 10001, 2781479), ("chrX", 155701383, 156030895)]
class CopyNumberSegment(object):
def __init__(self, chr, rr, tag, mean_cn, realbins, is_PAR=False):
self.chr = chr
self.rr = rr
self.start = rr[0] * 1000
self.end = rr[1] * 1000
self.span = self.end - self.start
self.tag = tag
self.mean_cn = mean_cn
self.realbins = realbins
self.is_PAR = is_PAR
def __str__(self):
mb = self.rr / 1000.0
coords = "{}:{}-{}Mb".format(self.chr, format_float(mb[0]), format_float(mb[1]))
if self.is_PAR:
coords += ":PAR"
msg = "[{}] {} CN={} bins={}".format(
self.tag, coords, self.mean_cn, self.realbins
)
if self.realbins >= 10000: # Mark segments longer than 10K bins ~ 10Mb
msg += "*"
return msg
@property
def bedline(self):
return "\t".join(
str(x)
for x in (self.chr, self.start, self.end, self.tag, self.span, self.mean_cn)
)
class CopyNumberHMM(object):
def __init__(
self, workdir, betadir="beta", mu=0.003, sigma=10, step=0.1, threshold=0.2
):
self.model = self.initialize(mu=mu, sigma=sigma, step=step)
self.workdir = workdir
self.betadir = betadir
if not op.exists(betadir):
sync_from_s3("s3://hli-mv-data-science/htang/ccn/beta", target_dir=betadir)
self.mu = mu
self.sigma = sigma
self.step = step
self.threshold = threshold
def run(self, samplekey, chrs=allsomes):
if isinstance(chrs, str):
chrs = [chrs]
allevents = []
for chr in chrs:
X, Z, clen, events = self.run_one(samplekey, chr)
allevents.extend(events)
return allevents
def run_one(self, samplekey, chr):
cov = np.fromfile(
"{}/{}-cn/{}.{}.cn".format(self.workdir, samplekey, samplekey, chr)
)
beta = np.fromfile("beta/{}.beta".format(chr))
std = np.fromfile("beta/{}.std".format(chr))
# Check if the two arrays have different dimensions
clen, blen = cov.shape[0], beta.shape[0]
tlen = max(clen, blen)
if tlen > clen:
cov = np.array(list(cov) + [np.nan] * (tlen - clen))
elif tlen > blen:
beta = np.array(list(beta) + [np.nan] * (tlen - blen))
clen, blen = cov.shape[0], beta.shape[0]
assert clen == blen, "cov ({}) and correction ({}) not same dimension".format(
clen, blen
)
normalized = cov / beta
fixed = normalized.copy()
fixed[np.where(std > self.threshold)] = np.nan
X = fixed
Z = self.predict(X)
med_cn = np.median(fixed[np.isfinite(fixed)])
print(chr, med_cn)
# Annotate segments
segments = self.annotate_segments(Z)
events = []
for mean_cn, rr in segments:
ss = fixed[rr[0] : rr[1]]
realbins = np.sum(np.isfinite(ss))
# Determine whether this is an outlier
segment = self.tag(chr, mean_cn, rr, med_cn, realbins)
if segment:
events.append((mean_cn, rr, segment))
events.sort(key=lambda x: x[-1].start)
# Send some debug info to screen
for mean_cn, rr, segment in events:
print(segment)
return X, Z, clen, events
def tag(self, chr, mean_cn, rr, med_cn, realbins, base=2):
around_0 = around_value(mean_cn, 0)
around_1 = around_value(mean_cn, 1)
around_2 = around_value(mean_cn, 2)
if realbins <= 1: # Remove singleton bins
return
if chr == "chrX":
start, end = rr
is_PAR = end < 5000 or start > 155000
if med_cn < 1.25: # Male
# PAR ~ 2, rest ~ 1
if is_PAR:
base = 2
if around_2:
return
else:
base = 1
if around_1:
return
else:
# All ~ 2
if around_2:
return
elif chr == "chrY":
if med_cn < 0.25: # Female
base = 0
if around_0:
return
else:
base = 1
if around_1:
return
else:
if around_2:
return
tag = "DUP" if mean_cn > base else "DEL"
segment = CopyNumberSegment(chr, rr, tag, mean_cn, realbins, is_PAR=False)
return segment
def initialize(self, mu, sigma, step):
from hmmlearn import hmm
# Initial population probability
n = int(10 / step)
startprob = 1.0 / n * np.ones(n)
transmat = mu * np.ones((n, n))
np.fill_diagonal(transmat, 1 - (n - 1) * mu)
# The means of each component
means = np.arange(0, step * n, step)
means.resize((n, 1, 1))
# The covariance of each component
covars = sigma * np.ones((n, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(n_components=n, covariance_type="full")
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.startprob_ = startprob
model.transmat_ = transmat
model.means_ = means
model.covars_ = covars
return model
def predict(self, X):
# Handle missing values
X = ma.masked_invalid(X)
mask = X.mask
dX = ma.compressed(X).reshape(-1, 1)
dZ = self.model.predict(dX)
Z = np.array([np.nan for _ in range(X.shape[0])])
Z[~mask] = dZ
Z = ma.masked_invalid(Z)
return Z * self.step
def annotate_segments(self, Z):
"""Report the copy number and start-end segment"""
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments
def plot(
self, samplekey, chrs=allsomes, color=None, dx=None, ymax=8, ms=2, alpha=0.7
):
from brewer2mpl import get_map
import matplotlib.pyplot as plt
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
if isinstance(chrs, str):
chrs = [chrs]
f, axs = plt.subplots(1, len(chrs), sharey=True)
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
plt.tight_layout()
if color is None:
color = choice(get_map("Set2", "qualitative", 8).mpl_colors)
for region, ax in zip(chrs, axs):
chr, start, end = parse_region(region)
X, Z, clen, events = self.run_one(samplekey, chr)
ax.plot(X, ".", label="observations", ms=ms, mfc=color, alpha=alpha)
ax.plot(Z, "k.", label="hidden", ms=6)
if start is None and end is None:
ax.set_xlim(0, clen)
else:
ax.set_xlim(start / 1000, end / 1000)
ax.set_ylim(0, ymax)
ax.set_xlabel("1Kb bins")
title = "{} {}".format(samplekey.split("_")[1], chr)
if dx:
title += " ({})".format(dx)
ax.set_title(title)
# The final calls
yy = 0.9
abnormal = [x for x in events if x[-1]]
if len(abnormal) > 5:
yinterval = 0.02
size = 10
else:
yinterval = 0.05
size = 12
for mean_cn, rr, event in events:
if mean_cn > ymax:
continue
ax.text(np.mean(rr), mean_cn + 0.2, mean_cn, ha="center", bbox=props)
if event is None:
continue
ax.text(
0.5,
yy,
str(event).rsplit(" ", 1)[0],
color="r",
ha="center",
transform=ax.transAxes,
size=size,
)
yy -= yinterval
axs[0].set_ylabel("Copy number")
def parse_region(region):
if ":" not in region:
return region, None, None
chr, start_end = region.split(":")
start, end = start_end.split("-")
return chr, int(start), int(end)
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
d = np.diff(condition)
(idx,) = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def format_float(f):
s = "{:.3f}".format(f)
return s.rstrip("0").rstrip(".")
def around_value(s, mu, max_dev=0.25):
return mu - max_dev < s < mu + max_dev
def main():
actions = (
("cib", "convert bam to cib"),
("coverage", "plot coverage along chromosome"),
("cn", "correct cib according to GC content"),
("mergecn", "compile matrix of GC-corrected copy numbers"),
("hmm", "run cnv segmentation"),
# Gene copy number
("exonunion", "collapse overlapping exons within the same gene"),
("gcn", "gene copy number based on Canvas results"),
("summarycanvas", "count different tags in Canvas vcf"),
# Interact with CCN script
("batchccn", "run CCN script in batch"),
("batchcn", "run HMM in batch"),
("plot", "plot some chromosomes for visual proof"),
# Benchmark, training, etc.
("sweep", "write a number of commands to sweep parameter space"),
("compare", "compare cnv output to ground truths"),
# Plots
("gcdepth", "plot GC content vs depth for genomic bins"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def gcdepth(args):
"""
%prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
```
"""
import hashlib
from jcvi.algorithms.formula import MAD_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha256(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > 0.001) | (mf["gc"] > 0.001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * 0.01, MAD_interval(v)) for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(
mf["gc"],
mf["depth"],
".",
color="lightslategray",
ms=2,
mec="lightslategray",
alpha=0.1,
)
patch = plt.fill_between(
gcd_x,
lo,
hi,
facecolor=color,
alpha=0.25,
zorder=10,
linewidth=0.0,
label="Median +/- MAD band",
)
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png")
def exonunion(args):
"""
%prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";'
"""
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(gencodebed,) = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7", o=",".join(["first"] * 4))))
def get_gain_loss_summary(vcffile):
"""Extract Canvas:GAIN/LOSS/REF/LOH tags"""
from cyvcf2 import VCF
counter = Counter()
for v in VCF(vcffile):
tag = v.ID.split(":")[1]
counter[tag] += 1
return counter
def summarycanvas(args):
"""
%prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
"""
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(
pf
+ " "
+ " ".join("{}:{}".format(k, v) for k, v in sorted(counter.items()))
)
def parse_segments(vcffile):
"""Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2
"""
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get("END") - 1
(cn,) = v.format("CN")[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds
def counter_mean_and_median(counter):
"""Calculate the mean and median value of a counter"""
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1.0 / total
return mean, median
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items()))
def gcn(args):
"""
%prog gcn gencode.v26.exonunion.bed data/*.vcf.gz
Compile gene copy njumber based on CANVAS results.
"""
p = OptionParser(gcn.__doc__)
p.set_cpus()
p.set_tmpdir(tmpdir="tmp")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
exonbed = args[0]
canvasvcfs = args[1:]
tsvfile = opts.outfile
tmpdir = opts.tmpdir
mkdir(tmpdir)
set_tempdir(tmpdir)
df = vcf_to_df(canvasvcfs, exonbed, opts.cpus)
for suffix in (".avgcn", ".medcn"):
df_to_tsv(df, tsvfile, suffix)
def vcf_to_df_worker(arg):
"""Convert CANVAS vcf to a dict, single thread"""
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit("_", 1)[0]
d = {"SampleKey": samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d
def vcf_to_df(canvasvcfs, exonbed, cpus):
"""Compile a number of vcf files into tsv file for easy manipulation"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args, callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df
def df_to_tsv(df, tsvfile, suffix):
"""Serialize the dataframe as a tsv"""
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns if x.endswith(suffix))
tf = df.reindex_axis(columns, axis="columns")
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep="\t", index=False, float_format="%.4g", na_rep="na")
print(
"TSV output written to `{}` (# samples={})".format(tsvfile, tf.shape[0]),
file=sys.stderr,
)
def coverage(args):
"""
%prog coverage *.coverage
Plot coverage along chromosome. The coverage file can be generated with:
$ samtools depth a.bam > a.coverage
The plot is a simple line plot using matplotlib.
"""
from jcvi.graphics.base import savefig
p = OptionParser(coverage.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
(covfile,) = args
df = pd.read_csv(covfile, sep="\t", names=["Ref", "Position", "Depth"])
xlabel, ylabel = "Position", "Depth"
df.plot(xlabel, ylabel, color="g")
image_name = covfile + "." + iopts.format
savefig(image_name)
def plot(args):
"""
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
"""
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def sweep(args):
"""
%prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space.
"""
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** 0.5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [0.00012 * golden_ratio ** x for x in range(10)]
sigmas = [0.0012 * golden_ratio ** x for x in range(20)]
thresholds = [0.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd)
def compare_worker(arg):
cnvoutput, truths = arg
cmd = "intersectBed -f .5 -F .5"
cmd += " -a {} -b {} | wc -l".format(cnvoutput, truths)
nlines = int(popen(cmd, debug=False).read())
target_lines = len([x for x in open(cnvoutput)])
truths_lines = len([x for x in open(truths)])
precision = nlines * 100.0 / target_lines
recall = nlines * 100.0 / truths_lines
d = "\t".join(
str(x)
for x in (
cnvoutput,
truths,
nlines,
target_lines,
truths_lines,
precision,
recall,
)
)
return d
def compare(args):
"""
%prog compare NA12878_array_hg38.bed *.seg
Compare cnv output to known ground truths.
"""
p = OptionParser(compare.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
truths = args[0]
cnvoutputs = args[1:]
cpus = min(len(cnvoutputs), opts.cpus)
p = Pool(processes=cpus)
results = []
files = [(x, truths) for x in cnvoutputs]
r = p.map_async(compare_worker, files, callback=results.append)
r.wait()
for res in results:
print("\n".join(res))
def bam_to_cib(arg):
bamfile, seq, samplekey = arg
bam = pysam.AlignmentFile(bamfile, "rb")
name, length = seq["SN"], seq["LN"]
logging.debug("Computing depth for {} (length={})".format(name, length))
pileup = bam.pileup(name)
a = np.ones(length, dtype=np.int8) * -128
for x in pileup:
a[x.reference_pos] = min(x.nsegments, 255) - 128
cibfile = op.join(samplekey, "{}.{}.cib".format(samplekey, name))
a.tofile(cibfile)
logging.debug("Depth written to `{}`".format(cibfile))
def cib(args):
"""
%prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base).
"""
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for _ in p.imap(bam_to_cib, task_args):
continue
def batchcn(args):
"""
%prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir.
"""
p = OptionParser(batchcn.__doc__)
p.add_option(
"--upload",
default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal)))
def hmm(args):
"""
%prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory.
"""
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=0.003, type="float", help="Transition probability")
p.add_option(
"--sigma",
default=0.1,
type="float",
help="Standard deviation of Gaussian emission distribution",
)
p.add_option(
"--threshold",
default=1,
type="float",
help="Standard deviation must be < this in the baseline population",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(
workdir=workdir, mu=opts.mu, sigma=opts.sigma, threshold=opts.threshold
)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}".format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug(
"A total of {} aberrant events written to `{}`".format(nevents, hmmfile)
)
return hmmfile
def batchccn(args):
"""
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
"""
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(csvfile,) = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df =
|
pd.read_csv(csvfile, header=header)
|
pandas.read_csv
|
import sys
import src.Embeddings_baselines.sts_utils as utils
import src.Preprocess.Utils as preprocess_utils
import os
import argparse
import numpy as np
import pandas as pd
import math
# Evaluate non trainable baselines on a given test set
# Download fasttext embeddings from https://fasttext.cc/docs/en/crawl-vectors.html
# fasttext.vec: wiki-news-300d-1M.vec.zip
# fasttext2.vec: rawl-300d-2M.vec.zip
# Parse command line arguments
parser = argparse.ArgumentParser(description='Evaluate simple STS baselines with word embeddings')
# parser.add_argument('test', help='test file')
parser.add_argument('--embeddings', choices= ['src/Embeddings_baselines/fasttext.vec', 'src/Embeddings_baselines/fasttext2.vec'], default='src/Embeddings_baselines/fasttext.vec', help='the word embeddings txt file')
parser.add_argument('--mode', choices=['centroid', 'align'], default='align', help='the scoring model')
parser.add_argument('--normalize', action='store_true', help='length normalize word embeddings')
parser.add_argument('--keep_stopwords', action='store_true', help='do not remove stopwords')
parser.add_argument('--encoding', default='utf-8', help='the character encoding for input (defaults to utf-8)')
args = parser.parse_args()
all_names = [
'PhrasIS_train_h_n',
'PhrasIS_train_h_p',
'PhrasIS_train_i_n',
'PhrasIS_train_i_p',
'PhrasIS_test_h_n',
'PhrasIS_test_h_p',
'PhrasIS_test_i_n',
'PhrasIS_test_i_p',
]
models=[
'centroid',
'align',
]
embeddings=[
'src/Embeddings_baselines/fasttext.vec',
'src/Embeddings_baselines/fasttext2.vec',
]
datasetsFolder = "dataset/bin"
if not os.path.exists(datasetsFolder):
sys.exit("First preprocess the datasets... Exiting")
all_datasets = dict( {name : preprocess_utils.loadDatasetPickle( os.path.join(datasetsFolder, name+".pickle")) for name in all_names })
all_datasets["PhrasIS_train_h_p+PhrasIS_train_h_n"] = pd.concat( (all_datasets["PhrasIS_train_h_p"], all_datasets["PhrasIS_train_h_n"]))
all_datasets["PhrasIS_train_i_p+PhrasIS_train_i_n"] = pd.concat( (all_datasets["PhrasIS_train_i_p"], all_datasets["PhrasIS_train_i_n"]))
all_datasets["PhrasIS_train_h_p+PhrasIS_train_i_p"] = pd.concat( (all_datasets["PhrasIS_train_h_p"], all_datasets["PhrasIS_train_i_p"]))
all_datasets["PhrasIS_train_h_p+PhrasIS_train_i_p+PhrasIS_train_h_n+PhrasIS_train_i_n"] = pd.concat( (all_datasets["PhrasIS_train_h_p+PhrasIS_train_h_n"], all_datasets["PhrasIS_train_i_p+PhrasIS_train_i_n"]))
all_datasets["PhrasIS_test_h_p+PhrasIS_test_h_n"] = pd.concat( (all_datasets["PhrasIS_test_h_p"], all_datasets["PhrasIS_test_h_n"]))
all_datasets["PhrasIS_test_i_p+PhrasIS_test_i_n"] = pd.concat( (all_datasets["PhrasIS_test_i_p"], all_datasets["PhrasIS_test_i_n"]))
all_datasets["PhrasIS_test_h_p+PhrasIS_test_i_p"] = pd.concat( (all_datasets["PhrasIS_test_h_p"], all_datasets["PhrasIS_test_i_p"]))
all_datasets["PhrasIS_test_h_p+PhrasIS_test_i_p+PhrasIS_test_h_n+PhrasIS_test_i_n"] =
|
pd.concat( (all_datasets["PhrasIS_test_h_p+PhrasIS_test_h_n"], all_datasets["PhrasIS_test_i_p+PhrasIS_test_i_n"]))
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
import os
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
df = pd.read_pickle('all_trips.pkl')
df.shape
# Trip duration
sns.set()
x = df['trip_duration']
ax = sns.distplot(x.to_list())
sns.set()
x_detail = df[
'trip_duration'][df['trip_duration'].between(1, 60, inclusive=True)]
ax = sns.distplot(x_detail.to_list())
# trips per day & hour
timeseries = df.set_index('trip_start_time')
x = timeseries.resample('H').size()
sns.lineplot(x=x.index, y=x.values)
sns.set_style("darkgrid")
timeseries = df.set_index('trip_start_time')
x = timeseries.resample('d').size()
ax = sns.barplot(x=x.index.strftime('%m-%d'), y=x.values)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
sns.set_style("darkgrid")
# Weather
# hourly
temp_per_hour = pd.read_pickle('weather_hourly.pkl')
temp_per_hour[['datetime', 'temp']].head(2)
trips_per_hour = df.groupby([pd.Grouper(key='trip_start_time', freq='H')]
).size().reset_index(name='trips')
trips_per_hour.head(2)
trips_per_hour = trips_per_hour.join(
temp_per_hour.set_index('datetime'),
on='trip_start_time')
trips_per_hour.head(2)
timeseries = trips_per_hour.set_index('trip_start_time')
x1 = timeseries['trips']
x2 = timeseries['temp']
sns.lineplot(x=x1.index, y=x1.values, color="g", label='trips per hour')
ax2 = plt.twinx()
sns.lineplot(x=x1.index, y=x2.values, color="b", ax=ax2, label='temp')
ax2.legend
plt.legend(loc='upper left', labels=['temperature'])
sns.set_style("darkgrid")
timeseries['temp'].corr(timeseries['trips'])
# daily
temp_per_day =
|
pd.read_pickle('weather_daily.pkl')
|
pandas.read_pickle
|
"""Tests a variety of python and pandas dtypes, and tests some specific
coercion examples."""
import pandas as pd
import pytest
from packaging import version
import numpy as np
import pandera as pa
from pandera import (
Column, DataFrameSchema, SeriesSchema, Check, DateTime, Float, Int,
String, Bool, Category, Object, Timedelta, PandasDtype
)
from pandera.dtypes import (
_DEFAULT_PANDAS_INT_TYPE, _DEFAULT_PANDAS_FLOAT_TYPE,
_DEFAULT_NUMPY_INT_TYPE, _DEFAULT_NUMPY_FLOAT_TYPE,
)
from pandera.errors import SchemaError
PANDAS_VERSION = version.parse(pd.__version__)
TESTABLE_DTYPES = [
(Bool, "bool"),
(DateTime, "datetime64[ns]"),
(Category, "category"),
(Float, Float.str_alias),
(Int, Int.str_alias),
(Object, "object"),
(String, String.str_alias),
(Timedelta, "timedelta64[ns]"),
("bool", "bool"),
("datetime64[ns]", "datetime64[ns]"),
("category", "category"),
("float64", "float64"),
]
def test_default_numeric_dtypes():
"""Test that default numeric dtypes int and float are consistent."""
assert str(
|
pd.Series([1])
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
#Handling Missing Values in Pandas
* Tutorial: https://news.towardsai.net/hmv
* Github
"""
#Import Required Libraries:
import numpy as np
import pandas as pd
#Scalar arguments:
#Numerical value
pd.notna(28)
#Scalar arguments:
#String value
pd.notna("Pratik")
#Scalar arguments:
#Empty strings are not considered as NA values
pd.notna("")
#Scalar arguments:
#Infinite values are not considered as NA values
|
pd.notna(np.inf)
|
pandas.notna
|
# -*- coding: utf-8 -*-
"""
FUNCTIONS FOR AUTOMATIC TEXT MINING
TO-DOS:
- Add Word Type Filter for Wordclouds (e.g. Adjectives, Nouns only)
- Add Stopword Filters for different languages (+ option to add custom stopwords)
"""
# 1. LIBRARY LOADING ----------------------------------------------------------
# 1.1 DATA MANIPULATION ---
import pandas as pd
import numpy as np
from dfply import *
from collections import Counter
from datetime import datetime
import os
# 1.2 TEXT MINING ---
#nltk.download('wordnet')
#nltk.download('stopwords')
#nltk.download('words')
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.collocations import *
from nltk.stem import WordNetLemmatizer
from nltk.metrics.association import QuadgramAssocMeasures
from textblob import TextBlob
import spacy
import regex
import re
from nltk.corpus import stopwords
#import gensim
from gensim.models import Word2Vec
# 1.3 CLUSTERING & DIMENSION REDUCTION ---
from sklearn.manifold import TSNE
from sklearn.cluster import DBSCAN, KMeans
from sklearn.metrics.pairwise import cosine_similarity
#from sklearn.metrics.pairwise import cosine_similarity
#from sklearn.decomposition import PCA
#from sklearn.metrics import silhouette_score as sc
#from sklearn.model_selection import GridSearchCV
#from sklearn.model_selection import ShuffleSplit
# 1.4 VISUALIZATION ---
import plotly
import plotly.express as px
from plotly.offline import plot
import plotly.figure_factory as ff
from wordcloud import WordCloud
# 2. SETTINGS ----------------------------------------------------------
stop_words = list(set(stopwords.words('english')))
stop_words.extend(['nan','NaN',"/", "people","family","test","no","the"])
DEFAULT_PLOTLY_COLORS = [
"#636efa",
"#EF553B",
"#00cc96",
"#ab63fa",
"#FFA15A",
"#19d3f3",
"#FF6692",
"#B6E880",
"#FF97FF",
"#FECB52",
"#636efa",
"#EF553B",
"#00cc96",
"#ab63fa",
"#FFA15A",
"#19d3f3",
"#FF6692",
"#B6E880",
"#FF97FF",
"#FECB52",
]
abbr_dict={
"what's":"what is",
"what're":"what are",
"who's":"who is",
"who're":"who are",
"where's":"where is",
"where're":"where are",
"when's":"when is",
"when're":"when are",
"how's":"how is",
"how're":"how are",
"i'm":"i am",
"we're":"we are",
"you're":"you are",
"they're":"they are",
"it's":"it is",
"he's":"he is",
"she's":"she is",
"that's":"that is",
"there's":"there is",
"there're":"there are",
"i've":"i have",
"we've":"we have",
"you've":"you have",
"they've":"they have",
"who've":"who have",
"would've":"would have",
"not've":"not have",
"i'll":"i will",
"we'll":"we will",
"you'll":"you will",
"he'll":"he will",
"she'll":"she will",
"it'll":"it will",
"they'll":"they will",
"isn't":"is not",
"wasn't":"was not",
"aren't":"are not",
"weren't":"were not",
"can't":"can not",
"couldn't":"could not",
"don't":"do not",
"didn't":"did not",
"shouldn't":"should not",
"wouldn't":"would not",
"doesn't":"does not",
"haven't":"have not",
"hasn't":"has not",
"hadn't":"had not",
"won't":"will not",
'\s+':' ', # replace multi space with one single space
}
# 3. FUNCTIONS ----------------------------------------------------------------
def pos_words (sentence, tokens, ptag):
word_dfs = []
for token in tokens:
sentences = [sent for sent in sentence.sents if token in sent.string]
pwrds = []
for sent in sentences:
for word in sent:
if token in word.string:
pwrds.extend([child.string.strip() for child in word.children if child.pos_ == ptag])
counts = Counter(pwrds).most_common(1000)
counts_df=pd.DataFrame(counts)
counts_df["token"]=len(counts)*[token]
word_dfs.append(counts_df)
return pd.concat(word_dfs, ignore_index=True)
def ngram_extract(corpus, methods = ["Quadgram","Trigram","Bigram"], min_freq=3):
output = []
for method in methods:
if method == "Bigram":
ngram_measures = nltk.collocations.BigramAssocMeasures()
ngramFinder = nltk.collocations.BigramCollocationFinder.from_words(corpus)
elif method == "Trigram":
ngram_measures = nltk.collocations.TrigramAssocMeasures()
ngramFinder = nltk.collocations.TrigramCollocationFinder.from_words(corpus)
elif method == "Quadgram":
ngram_measures = QuadgramAssocMeasures()
ngramFinder = nltk.collocations.QuadgramCollocationFinder.from_words(corpus)
ngramFinder.apply_freq_filter(min_freq)
#ngramFinder.apply_word_filter(lambda w: w.lower() in stop_words)
ngram_metrics=pd.DataFrame()
for metric in ["pmi","raw_freq","likelihood_ratio","chi_sq","student_t","jaccard","poisson_stirling"]:
metric_table = pd.DataFrame(list(ngramFinder.score_ngrams(getattr(ngram_measures,metric))), columns=['ngram',metric]).sort_values(by="ngram", ascending=False)
if ngram_metrics.empty:
ngram_metrics = metric_table
else:
ngram_metrics.insert(1,metric,metric_table[metric],True)
if method == "Bigram":
ngram_metrics = ngram_metrics[ngram_metrics.ngram.map(lambda x: rightTypes(x))]
elif method == "Trigram":
ngram_metrics = ngram_metrics[ngram_metrics.ngram.map(lambda x: rightTypesTri(x))]
elif method == "Quadgram":
ngram_metrics = ngram_metrics[ngram_metrics.ngram.map(lambda x: rightTypesQuad(x))]
print('!!!!!',ngram_metrics)
if len(ngram_metrics.index) != 0:
ngram_ranks=pd.DataFrame(ngram_metrics["ngram"])
for column in ngram_metrics.columns[1:]:
ngram_ranks[column]=ngram_metrics[column].rank(ascending=0)
ngram_ranks['mean'] = ngram_ranks.mean(axis=1)
final_ngrams = ngram_ranks[["ngram",'mean']].sort_values('mean')
lookup = final_ngrams["ngram"].tolist()
print("\nThese are the extracted "+method+"s: ", lookup)
if method == "Bigram":
idx = 0
while idx < (len(corpus)-1):
output.append(corpus[idx])
if (corpus[idx], corpus[idx+1]) in lookup:
output[-1] += str("_"+corpus[idx+1])
idx += 2
else:
idx += 1
elif method == "Trigram":
idx = 0
while idx < (len(corpus)-2):
output.append(corpus[idx])
if (corpus[idx], corpus[idx+1], corpus[idx+2]) in lookup:
output[-1] += str("_"+corpus[idx+1]+"_"+corpus[idx+2])
idx += 3
else:
idx += 1
elif method == "Quadgram":
idx = 0
while idx < (len(corpus)-3):
output.append(corpus[idx])
if (corpus[idx], corpus[idx+1], corpus[idx+2], corpus[idx+3]) in lookup:
output[-1] += str("_"+corpus[idx+1]+"_"+corpus[idx+2]+corpus[idx+3])
idx += 4
else:
idx += 1
return output
else:
return corpus
#print("Extracted Keywords:")
#print([string for string in output if '_' in string].unique()) # PRINT ALL N-GRAMS IN CORPUS
def text_cleaner(text, social = False, correct = False, lemma = False):
#text = [string for string in text if string not in ["NaN","nan",np.NaN]]
text=[string for string in text if isinstance(string, float) != True]
#text = [string for string in text if str(string).lower() not in ['-66','-99','nan']]
#p = re.compile(r"(\b[-#'\.]\b)|[\W]")
#text = [p.sub(lambda m: (m.group(1) if m.group(1) else " "), string) for string in text]
#text = [regex.sub(r' +', ' ', str(string)).lower() for string in text] # LOWER AND REMOVE DOUBLE SPACINGS
#text = [regex.sub(r'[0-9]+', '', string) for string in text] # REMOVE ALL NUMBERS
#text = [' '.join([abbr_dict.get(i, i) for i in string.split()]) for string in text] #REPLACE ABBREVATIONS
text = " ".join([string.lower() for string in text])
# if lemma == True:
#
# nlp = spacy.load("en_core_web_sm")
# text = nlp(text)
# text = [token.lemma_ for token in text if not token.is_stop]
# text=list(map(lambda i: i.lower(),text))
#
# if correct == True:
#
# text = [TextBlob(string).correct() for string in text] # GRAMMAR CORRECTION
#
# if social == True:
#
# # To remove web links(http or https) from the tweet text
# text = re.sub(r"http\S+", "", text)
# # To remove hashtags (trend) from the tweet text
# text = re.sub(r"#\S+", "", text)
# # To remove user tags from tweet text
# text = re.sub(r"@\S+", "", text)
# # To remove re-tweet "RT"
# text = re.sub(r"RT", "", text)
# # To remove digits in the tweets
# text = re.sub(r"\d+", "", text)
# # To remove new line character if any
# text = text.replace('\\n','')
#
#corpus = ' '.join([word for line in text for word in line.split()])
corpus = text.split()
corpus = [string for string in corpus]
return corpus
def rightTypes(ngram): # Filter Bigrams
if '-pron-' in ngram or 't' in ngram:
return False
for word in ngram:
if word in stop_words or word.isspace():
return False
acceptable_types = ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
second_type = ('NN', 'NNS', 'NNP', 'NNPS')
tags = nltk.pos_tag(ngram)
if tags[0][1] in acceptable_types and tags[1][1] in second_type:
return True
else:
return False
def rightTypesTri(ngram): # Filter Trigrams
if '-pron-' in ngram or 't' in ngram:
return False
for word in ngram:
if word in stop_words or word.isspace():
return False
first_type = ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
third_type = ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
tags = nltk.pos_tag(ngram)
if tags[0][1] in first_type and tags[2][1] in third_type:
return True
else:
return False
def rightTypesQuad(ngram): # Filter Quadgrams
if '-pron-' in ngram or 't' in ngram:
return False
for word in ngram:
if word in stop_words or word.isspace():
return False
first_type = ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
fourth_type = ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
tags = nltk.pos_tag(ngram)
if tags[0][1] in first_type and tags[3][1] in fourth_type:
return True
else:
return False
# -----------------------------------------------------------------------------
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
#use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
#keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
#create a tuples of feature,score
#results = zip(feature_vals,score_vals)
results= {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]]=score_vals[idx]
return results
def key_term_plot(corpus, clust_alg="kmeans",title="XXX", client_name = "Text", min_count=3, scaling=2, dims=[2], state=42, as_picture = True, as_interactive=False, custom_tsne_param_grid=dict(),custom_cluster_param_grid=dict() ):
"""
Key Term Visualization in 2D/3D Space
Description
----------
TBD
Parameters
----------
corpus : str
Text Source
clust_alg : ["kmeans","dbscan"]
Type of Algorithm to cluster Key Terms
dims : int [2,3]
Number of Plotting Dimensions.
Can be a list to iterate over.
client_name : str
Type of Object which the text is about.
min_count : int
Minimum Frequency of a term to be considered for Visualization.
state : int
Random State Number for Reproducability
scaling : int
Size Scaling of rendered pictures.
as_picture : bool
If TRUE, plot will be exported as picture.
as_interactive: bool
If TRUE, plit will be exported as interactive HTML file.
custom_tsne_param_grid : dict
Custom Parameters for TSNE to iterate over.
If empty, default parameters will be considered for iteration.
custom_cluster_param_grid : dict
Custom Parameters for Clustering to iterate over.
If empty, default parameters will be considered for iteration.
Returns
-------
int
Description of return value
Further Features to Consider:
-----------------------------
- Select different term category filters (e.g. Adjectives and Nouns only)
- Fuzzy Word Matching for merging Similar Adjectives Adverbs
- PCA opposed to TSNE?
"""
adjectives = []
for category in ["a","s","r"]:
for x in wn.all_synsets(category):
adjectives.append(x.name().split('.', 1)[0])
model = Word2Vec(corpus,min_count=min_count)
words = []
embeddings = []
freqs = []
for word in model.wv.vocab:
if word in adjectives:
try:
embeddings.append(model[word])
except KeyError:
embeddings.append(model["none"])
words.append(word)
freqs.append(model.wv.vocab[word].count)
for n_dimensions in dims:
def make_generator(parameters):
if not parameters:
yield dict()
else:
key_to_iterate = list(parameters.keys())[0]
next_round_parameters = {p : parameters[p]
for p in parameters if p != key_to_iterate}
for val in parameters[key_to_iterate]:
for pars in make_generator(next_round_parameters):
temp_res = pars
temp_res[key_to_iterate] = val
yield temp_res
# TSNE ----------------------------------------------------------------
fixed_tsne_params = {"n_components": n_dimensions, "random_state": state , "n_iter": 1000}
if not custom_tsne_param_grid:
tsne_param_grid = {"perplexity": range(5,50,10), "learning_rate": range(100,1000,100)}
for tsne_params in make_generator(tsne_param_grid):
final_tsne_params = {**tsne_params, **fixed_tsne_params}
tsne = TSNE(**final_tsne_params)
tsne_results = tsne.fit_transform(embeddings)
# CLUSTERING ------------------------------------------------------
fixed_cluster_params = {"n_jobs": -1 , "random_state": state , "max_iter": 1000}
if not custom_cluster_param_grid:
cluster_param_grid = {"n_clusters": range(3,8)}
for cluster_params in make_generator(cluster_param_grid):
final_cluster_params = {**cluster_params, **fixed_cluster_params}
df=pd.DataFrame()
df['Dimension 1'] = tsne_results[:,0]
df['Dimension 2'] = tsne_results[:,1]
if n_dimensions == 3:
df['Dimension 3'] = tsne_results[:,2]
db = KMeans(**final_cluster_params).fit(df)
df['Frequency'] = freqs
df['Word'] = words
df['Value Cluster'] = db.labels_
df = df.sort_values('Value Cluster')
df['Value Cluster'] = df['Value Cluster'].astype('category')
# PLOTTING ----------------------------------------------------
if n_dimensions == 2: # 2-DIMESIONAL
fig = px.scatter(data_frame=df,
x='Dimension 1',
y='Dimension 2',
size='Frequency',
text="Word",
color = 'Value Cluster',
size_max=75,
opacity=0.6,
width=900*scaling,
height=600*scaling,
hover_name='Word')
fig.update_traces(textposition='middle center', textfont=dict(size=8))
for m, s in enumerate(np.unique(df['Value Cluster']).tolist()):
fig.data[m].name = ("Value Cluster "+str(m+1))
fig.layout.shapes = fig.layout.shapes + ({
"type":"circle",
"layer":"below",
"xref":"x",
"yref":"y",
"x0":min(df[df['Value Cluster'] == s]['Dimension 1']),
"y0":min(df[df['Value Cluster'] == s]['Dimension 2']),
"x1":max(df[df['Value Cluster'] == s]['Dimension 1']),
"y1":max(df[df['Value Cluster'] == s]['Dimension 2']),
"opacity":0.1,
"fillcolor": DEFAULT_PLOTLY_COLORS[int(s)],
"line_color":DEFAULT_PLOTLY_COLORS[int(s)],
},)
elif n_dimensions == 3: # 3-DIMENSIONAL
fig = px.scatter_3d(data_frame=df,
x='Dimension 1',
y='Dimension 2',
z='Dimension 3',
size='Frequency',
text="Word",
color = 'Value Cluster',
size_max=75,
opacity=0.6,
width=900*scaling,
height=600*scaling,
hover_name='Word')
fig.update_traces(textposition='middle center', textfont=dict(size=8))
for m, s in enumerate(np.unique(df['Value Cluster']).tolist()):
fig.data[m].name = ("Value Cluster "+str(m+1))
for s in np.unique(df['Value Cluster']).tolist():
fig.add_mesh3d(
alphahull= 7,
x=df[df['Value Cluster'] == s]['Dimension 1'],
y=df[df['Value Cluster'] == s]['Dimension 2'],
z=df[df['Value Cluster'] == s]['Dimension 3'],
opacity=0.1,
color=DEFAULT_PLOTLY_COLORS[int(s)]
)
fig.layout.template = "plotly_white"
fig.layout.font = dict(
family='Arial',
)
fig.update_layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
title_text=('<b>Personality Trait Landscape </b> | An overview of attributes associated with ' + str(client_name))
)
fig.layout.hoverlabel.font = dict(
family='Arial',
size=8
)
fig.layout.legend=dict(
orientation="v",
font = dict(size=10),
#itemclick="toggleothers",
traceorder="normal",
xanchor="center",
yanchor="bottom",
itemsizing="constant"
)
primary = "#e6e6e6"
secondary = "#f0f0f0"
fig.update_xaxes(showgrid=True, gridwidth=0.01, gridcolor=secondary,
showline=True, linewidth=2, linecolor=primary,mirror=True,
zeroline=True, zerolinewidth=2, zerolinecolor=primary)
fig.update_yaxes(showgrid=True, gridwidth=0.01, gridcolor=secondary,
showline=True, linewidth=2, linecolor=primary,mirror=True,
zeroline=True, zerolinewidth=2, zerolinecolor=primary)
# EXPORTING ---------------------------------------------------
name = (title+" - "+str(n_dimensions)+"D - TSNE "+ str(tsne_params)+" - CLUSTER "+ str(cluster_params)+" - STATE "+str(state))
name = name.replace("}","")
name = name.replace("{","")
name = name.replace("'","")
name = name.replace(","," ")
name = name.replace(":","")
if as_interactive == True:
plotly.offline.plot(fig, filename=(name+".html"), auto_open=False)
if as_picture == True:
fig.write_image((name+".png"),
width=900*scaling,
height=600*scaling,
scale=5
)
# ---
def theme_correlation(df, project_name, cols, n_cols,):
writer = pd.ExcelWriter(project_name+'_'+str(datetime.today().strftime('%Y%m%d'))+'_Co-Occurence Matrix.xlsx',engine='xlsxwriter')
workbook=writer.book
indexes=[df.columns.get_loc(c) for c in cols]
for i,x in enumerate(indexes):
# THEME-MATRIX CALCULATION
names = df.iloc[:,(indexes[i]+1):(indexes[i]+n_cols[i]+1)].columns.tolist()
theme_matrix = cosine_similarity(df[names].to_numpy().T)
theme_names = cosine_similarity(df[names].to_numpy().T)
np.fill_diagonal(theme_matrix, 0)
np.fill_diagonal(theme_names, 1)
# PLOTTING
co_occurence_plot = ff.create_annotated_heatmap(z=theme_matrix,
x=names,
xgap = 10,
ygap = 10,
y=names,
annotation_text=np.around(theme_names,2),
colorscale = 'greens'
)
co_occurence_plot.update_layout(height=800,width=2000)
co_occurence_plot.layout.template = 'plotly_white'
co_occurence_plot.update_xaxes(showgrid=False, zeroline=False)
co_occurence_plot.update_yaxes(showgrid=False, zeroline=False)
# EXCEL WRITING
cooccurence_output = pd.DataFrame(theme_matrix)
worksheet=workbook.add_worksheet(str(x))
writer.sheets[str(x)] = worksheet
cooccurence_output.to_excel(writer,sheet_name=str(x),startrow=0, startcol=0)
plot(co_occurence_plot)
writer.save()
def theme_crosstabs(df, cols, n_cols, project_name, groupings):
writer = pd.ExcelWriter(project_name+"_"+str(datetime.today().strftime('%Y%m%d'))+'_Theme-CrossTabs.xlsx',engine='xlsxwriter')
workbook=writer.book
indexes=[df.columns.get_loc(c) for c in cols]
for i,x in enumerate(indexes):
names= df.iloc[:,(indexes[i]+1):(indexes[i]+n_cols[i]+1)].columns.tolist()
crosstab = (df >> gather('variable', 'value', [names]) >> filter_by(X['value'] != 0))
#pd.crosstab(index=crosstab['Age'], columns=crosstab['variable'])
worksheet=workbook.add_worksheet(cols[i])
writer.sheets[cols[i]] = worksheet
for w, q in enumerate(groupings):
# PERCENTAGE CROSSTABS
percentage_crosstab=pd.crosstab(crosstab[groupings[w]], crosstab["variable"],normalize='index').T
percentage_crosstab.to_excel(writer,sheet_name=cols[i],startrow=(w*25) , startcol=0)
# ABSOLUTE CROSSTABS
absolute_crosstab=
|
pd.crosstab(crosstab[groupings[w]], crosstab["variable"],margins=True)
|
pandas.crosstab
|
# -*- coding: utf-8 -*-
"""
Created on 2017-5-20
@author: cheng.li
"""
import os
import arrow
import datetime as dt
import uqer
import sqlalchemy
import numpy as np
import pandas as pd
import pendulum
from airflow.operators.python_operator import PythonOperator
from airflow.models import DAG
from uqer import DataAPI as api
from alphamind.utilities import alpha_logger
from sqlalchemy import select, and_, or_, MetaData, delete
from PyFin.api import advanceDateByCalendar
from PyFin.api import isBizDay
from alphamind.api import SqlEngine
from alphamind.data.dbmodel.models import *
from alphamind.api import Universe as UniversProxy
from alphamind.api import industry_styles
uqer.DataAPI.api_base.timeout = 300
local_tz = pendulum.timezone("Asia/Shanghai")
start_date = dt.datetime(2018, 7, 27, tzinfo=local_tz)
dag_name = 'update_uqer_data_postgres'
default_args = {
'owner': 'wegamekinglc',
'depends_on_past': True,
'start_date': start_date
}
dag = DAG(
dag_id=dag_name,
default_args=default_args,
schedule_interval='0 9 * * 1,2,3,4,5'
)
_ = uqer.Client(token=os.environ['DATAYES_TOKEN'])
engine = sqlalchemy.create_engine(os.environ['DB_URI'])
alpha_engine = SqlEngine(os.environ['DB_URI'])
def process_date(ds):
alpha_logger.info("Loading data at {0}".format(ds))
this_date = dt.datetime.strptime(ds, '%Y-%m-%d')
ref_date = this_date.strftime('%Y%m%d')
return ref_date, this_date
def format_data(df, format='%Y%m%d'):
df['trade_date'] = pd.to_datetime(df['trade_date'], format=format)
def check_holiday(this_date):
flag = isBizDay('china.sse', this_date)
if not flag:
alpha_logger.info('Job will be omitted as {0} is a holiday'.format(this_date))
return flag
def data_info_log(df, table):
data_len = len(df)
if data_len > 0:
alpha_logger.info("{0} records will be inserted in {1}".format(data_len, table))
else:
msg = "No records will be inserted in {0}".format(table)
alpha_logger.warning(msg)
raise ValueError(msg)
def update_uqer_factors(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktStockFactorsOneDayProGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(Uqer).where(Uqer.trade_date == this_date)
engine.execute(query)
data_info_log(df, Uqer)
format_data(df, format='%Y-%m-%d')
df.to_sql(Uqer.__table__.name, engine, index=False, if_exists='append')
def update_uqer_market(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktEqudGet(tradeDate=ref_date)
df.rename(columns={'tradeDate': 'trade_date', 'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(Market).where(Market.trade_date == this_date)
engine.execute(query)
data_info_log(df, Market)
format_data(df, format='%Y-%m-%d')
df.to_sql(Market.__table__.name, engine, index=False, if_exists='append')
def update_uqer_index_market(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.MktIdxdGet(tradeDate=ref_date)
df = df[df.exchangeCD.isin(['XSHE', 'XSHG', 'ZICN'])]
df = df[df.ticker <= '999999']
df.rename(columns={'tradeDate': 'trade_date',
'ticker': 'indexCode',
'CHGPct': 'chgPct',
'secShortName': 'indexShortName'}, inplace=True)
df = df[['trade_date',
'indexCode',
'preCloseIndex',
'openIndex',
'highestIndex',
'lowestIndex',
'closeIndex',
'turnoverVol',
'turnoverValue',
'chgPct']]
df['indexCode'] = df.indexCode.astype(int)
query = delete(IndexMarket).where(IndexMarket.trade_date == this_date)
engine.execute(query)
data_info_log(df, Market)
format_data(df, format='%Y-%m-%d')
df.to_sql(IndexMarket.__table__.name, engine, index=False, if_exists='append')
def update_uqer_halt_list(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
df = api.SecHaltGet(beginDate=ref_date, endDate=ref_date)
df = df[df.assetClass == 'E']
df['trade_date'] = ref_date
df.rename(columns={'ticker': 'code'}, inplace=True)
df.code = df.code.astype(int)
del df['secID']
query = delete(HaltList).where(HaltList.trade_date == this_date)
engine.execute(query)
data_info_log(df, HaltList)
format_data(df)
df.to_sql(HaltList.__table__.name, engine, index=False, if_exists='append')
def update_universe(ds, **kwargs):
ref_date, this_date = process_date(ds)
flag = check_holiday(this_date)
if not flag:
return
query = delete(Universe).where(
Universe.trade_date == this_date,
)
engine.execute(query)
# indexed universe
universe_map = {'hs300': 300,
'sh50': 16,
'zz500': 905,
'zz800': 906,
'zz1000': 852,
'zxb': 399005,
'cyb': 399006}
total_df = None
for u in universe_map:
query = select([IndexComponent.code]).where(
and_(
IndexComponent.trade_date == this_date,
IndexComponent.indexCode == universe_map[u]
)
)
df = pd.read_sql(query, engine)
df[u] = 1
if total_df is None:
total_df = df
else:
total_df =
|
pd.merge(total_df, df, on=['code'], how='outer')
|
pandas.merge
|
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod=time_period)
except:
pass
return df_atr
def macd(self, l_sym, df_price, fast_period, slow_period, signal_period):
df_macd = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdsignal = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdhist = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_macd[sym], df_macdsignal[sym], df_macdhist[sym] = talib.MACD(np.asarray(df_price[sym]), fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period)
except:
pass
return df_macd, df_macdsignal, df_macdhist
def wavec(self, l_sym, df_three, df_four, df_five):
df_ca = pd.DataFrame(columns=l_sym, index=df_three.index)
df_cb = pd.DataFrame(columns=l_sym, index=df_three.index)
for sym in l_sym:
df_ca[sym] = df_four[sym] - df_five[sym]
df_cb[sym] = df_three[sym] - df_four[sym]
return df_ca, df_cb
def waveb(self, l_sym, df_two, df_three, df_four):
df_ba = pd.DataFrame(columns=l_sym, index=df_two.index)
df_bb = pd.DataFrame(columns=l_sym, index=df_two.index)
for sym in l_sym:
df_ba[sym] = df_three[sym] - df_four[sym]
df_bb[sym] = df_two[sym] - df_three[sym]
return df_ba, df_bb
def wavea(self, l_sym, df_one, df_two, df_three):
df_aa = pd.DataFrame(columns=l_sym, index=df_one.index)
df_ab = pd.DataFrame(columns=l_sym, index=df_one.index)
for sym in l_sym:
df_aa[sym] = df_two[sym] - df_three[sym]
df_ab[sym] = df_one[sym] - df_two[sym]
return df_aa, df_ab
def keltner(self, l_sym, df_high, df_low, df_close, ema_period, atr_period, multiplier):
df_kch_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_m = self.ema(l_sym, df_close, time_period=ema_period)
df_atr = self.atr(l_sym, df_high, df_low, df_close, time_period=atr_period)
for sym in l_sym:
df_kch_u[sym] = df_kch_m[sym] + (multiplier * df_atr[sym])
df_kch_l[sym] = df_kch_m[sym] - (multiplier * df_atr[sym])
return df_kch_u, df_kch_m, df_kch_l
def ichimoku(self, l_sym, df_high, df_low):
df_ichimoku_tenkan_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_tenkan_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun =
|
pd.DataFrame(columns=l_sym, index=df_high.index)
|
pandas.DataFrame
|
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan =
|
Series([np.NaN] * 5)
|
pandas.Series
|
import re
import string
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import seaborn as sns
import os, json
from collections import Counter
from config import FileConfig
import pprint
import datetime
import sqlite3
from pymongo import MongoClient
import datetime
from bson.objectid import ObjectId
currdate = datetime.datetime.now()
import sqlite3
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
class TanqeebEDA(BasePreprocessor):
"""Code for analyzing tanqeeb cvs."""
def __init__(self):
"""Initialize"""
self.extdir = os.path.join(FileConfig.EXTDIR,'tanqeeb')
self.outdir = os.path.join(FileConfig.EXTDIR,'tanqeeb')
self.figdir = os.path.join(FileConfig.FIGDIR,'tanqeeb')
self.conn = sqlite3.connect(os.path.join(self.outdir,"tanqeebcv.db"), timeout=10)
self.cursor = self.conn.cursor()
client = MongoClient('localhost', 27017)
self.db = client['tanqeeb']
#self.set_mappings()
def compute_stats(self, df):
"""Compute stats on information filled out and available in the CVs"""
df['age'] = [np.floor((datetime.datetime.now()-row['Birth Date']).days/365) for i, row in df.iterrows()]
countries = list(set(df['Nationality']))
# document non-missing
avail = df.notnull().sum()/len(df)*100
avail.sort_values(inplace=True)
params = {
"xtitle":"Percent Available",
"ytitle":"Resume Field",
"title":"Availability of Resume Fields\nData: %s (%d Observations)" % ('Tanqeeb', len(df)),
"filename": "percent_resumes_available_all.png"
}
self._graph_bar(avail.index, avail.values, params)
for cty in countries:
tempdf = df[df['Nationality']==cty]
if len(tempdf) > 100:
print(tempdf['Gender'].value_counts()/len(df))
# document non-missing
avail = tempdf.notnull().sum()/len(tempdf)*100
avail.sort_values(inplace=True)
params = {
"xtitle":"Percent Available",
"ytitle":"Resume Field",
"title":"Availability of Resume Fields\nCountry: %s\nData: %s (%d Observations)" % (cty, 'Tanqeeb', len(tempdf)),
"filename": "percent_resumes_available_%s.png" % (cty)
}
self._graph_bar(avail.index, avail.values, params)
# Months last active
df['months_since_last_active'] = [month_diff(row['last_active'], datetime.datetime.now()) if pd.isnull(row['last_active']) is False else -1 for i, row in df.iterrows()]
# generate percent active over different time periods
temp = df['months_since_last_active'].value_counts()
print(temp.values)
last_active = []
for i in [1, 3, 6, 12]:
last_active.append([i, np.sum(temp[(temp.index <= i) & (temp.index >= 0)])/np.sum(temp)])
temp = pd.DataFrame(last_active, columns=['Active in Last Months', 'Number of Users'])
params = {
"xtitle":"Percent Active",
"ytitle":"In Last Months",
"title":"Percent of Active Users\nData: %s" % ('Tanqeeb'),
"filename": "percent_active_users_%s.png"
}
self._graph_bar(temp['Active in Last Months'], temp['Number of Users'], params)
return(df)
def document_users(self, df):
"""Get general statistics on users"""
query = """SELECT DISTINCT country, id FROM resumelinks;"""
tempdf = pd.read_sql(query, self.conn)
print(tempdf.head())
df['id'] = df['_id']
df = tempdf.merge(df, how='left', on=['id'])
print(len(df))
df['female'] = [1 if row['Gender'] == 'Female' else 0 for i, row in df.iterrows()]
df['employed'] = [1 if ~pd.isnull(row['jobstatus']) and str(row['jobstatus']).strip() == 'Working but looking for new opportunities' else 0 for i, row in df.iterrows()]
aggregation = {
'age':{
'age_mean':'mean',
'age_std':'std'
},
'female':'mean',
'employed':'mean'
}
stats = df.groupby(['country']).agg(aggregation)
stats.to_csv(os.path.join(FileConfig.INTDIR,'tanqeeb','resume_stats.csv'))
print(stats)
def extract_from_mongodb(self):
"""Extract data from mongodb and format it for analysis."""
resumes = self.db['resumes']
results = self.db.resumes.find({"error":{"$exists": False}})
df = pd.DataFrame(list(results))
print(df.head())
re1 = re.compile('(\d+)\-(\d+)\-(\d+)')
df['Birth Date'] = [row['Birth Date'] if re1.match(str(row['Birth Date'])) is not None else np.nan for i, row in df.iterrows()]
df['Birth Date'] = pd.to_datetime(df['Birth Date'], errors='ignore')
df['Marital Status'] = [np.nan if row['Marital Status'] == '-' else row['Marital Status'] for i, row in df.iterrows()]
print(len(df))
print(df.head())
print(df.dtypes)
df.to_csv(os.path.join(self.extdir,'csv','resumes.csv'), index=False)
return(df)
def get_valid_resumes(self, dfmd):
"""Get valid resumes and cross-join with the other data."""
# check the data in the db
query = """SELECT country, srchtitle, COUNT(id) as cnt
FROM resumelinks
WHERE srchtitle IN ('Engineering','Marketing','Sales')
GROUP BY country, srchtitle
ORDER BY country, cnt DESC
;"""
df = pd.read_sql(query, self.conn)
query = """SELECT DISTINCT id, country, srchtitle FROM resumelinks WHERE srchtitle IN ('Engineering','Marketing','Sales');"""
df = pd.read_sql(query, self.conn)
print(len(df))
dfmd['id'] = dfmd['_id']
df = df.merge(dfmd, how='left', on=['id'])
print(len(df))
df = df[(~pd.isnull(df['education'])) & (~pd.isnull(df['experiences'])) & (~
|
pd.isnull(df['skills'])
|
pandas.isnull
|
import pytest
import shutil
import pandas
from pipen import Proc
from pipen.exceptions import (
ProcInputKeyError,
ProcInputTypeError,
ProcScriptFileNotFound,
ProcWorkdirConflictException,
)
from datar.dplyr import mutate
from .helpers import (
In2Out1Proc,
NoInputProc,
NormalProc,
RelPathScriptProc,
ScriptNotExistsProc,
SimpleProc,
InputTypeUnsupportedProc,
pipen,
)
def test_proc_repr():
assert repr(SimpleProc) == "<Proc:SimpleProc>"
def test_from_proc_no_name():
procs = [None]
with pytest.raises(ValueError, match="Process name cannot"):
procs[0] = Proc.from_proc(SimpleProc)
def test_from_proc():
proc = Proc.from_proc(
SimpleProc,
name="new_proc",
desc="new desc",
envs={"a": 1},
cache=True,
forks=2,
plugin_opts={"p": 1},
scheduler="sge",
scheduler_opts={"s": 1},
error_strategy="retry",
num_retries=10,
submission_batch=3,
)
assert proc.name == "new_proc"
assert proc.desc == "new desc"
assert proc.envs == {"a": 1}
assert proc.cache
assert proc.forks == 2
assert proc.plugin_opts == {"p": 1}
assert proc.scheduler == "sge"
assert proc.scheduler_opts == {"s": 1}
assert proc.error_strategy == "retry"
assert proc.num_retries == 10
assert proc.submission_batch == 3
# def test_from_proc_name_from_assignment():
# proc = Proc.from_proc(SimpleProc)
# assert proc.name == "proc"
def test_proc_workdir_conflicts(pipen):
proc1 = Proc.from_proc(NormalProc, name="proc.1")
Proc.from_proc(NormalProc, name="proc-1", requires=proc1)
with pytest.raises(ProcWorkdirConflictException):
pipen.set_starts(proc1).run()
def test_cached_run(caplog, pipen):
NormalProc.nexts = []
# force uncache NormalProc
shutil.rmtree(pipen.config.workdir)
ret = pipen.set_start(NormalProc).run()
assert ret
# trigger caching
ret = pipen.set_start(NormalProc).run()
assert ret
assert caplog.text.count("Cached jobs:") == 1
def test_more_nexts(pipen):
proc1 = Proc.from_proc(NormalProc)
Proc.from_proc(NormalProc, "proc2", requires=proc1)
Proc.from_proc(NormalProc, "proc3", requires=proc1)
ret = pipen.set_starts(proc1).run()
assert ret
def test_proc_no_input(pipen):
with pytest.raises(ProcInputKeyError):
pipen.set_starts(NoInputProc).run()
def test_unsupported_input_type(pipen):
with pytest.raises(ProcInputTypeError):
pipen.set_starts(InputTypeUnsupportedProc).run()
def test_proc_with_input_data(pipen):
proc = Proc.from_proc(NormalProc, input_data=[1])
pipen.set_starts(proc).run()
assert proc.output_data.equals(pandas.DataFrame({"output": ["1"]}))
def test_proc_with_input_callable(pipen):
proc = Proc.from_proc(NormalProc, input_data=[1])
proc2 = Proc.from_proc(
NormalProc,
requires=proc,
input_data=lambda ch: ch >> mutate(output=2)
)
pipen.set_starts(proc).run()
assert proc2.output_data.equals(
|
pandas.DataFrame({"output": ["2"]})
|
pandas.DataFrame
|
import pandas as pd
tmp_df = pd.read_pickle("./ICD2Vec.pkl")
tmp0 = pd.DataFrame()
for key, values in tmp_df.items():
print(key)
#print(values)
tmp1 = pd.DataFrame({'DIS_CODE': [key]})
tmp2 =
|
pd.DataFrame(values)
|
pandas.DataFrame
|
# standard libraries
import enum
import io
import unittest
# third-party libraries
import pandas
# library under test
import ccbb_pyutils.alignment_stats as ns_test
class TestFunctions(unittest.TestCase):
def _get_fastqc_test_data_dir(self):
return "test_data/fastqc_data/"
def _get_fastqc_and_star_htseq_data(self):
return "test_data/fastqc_and_star_htseq_data/"
# region _find_total_seqs_from_fastqc
def test__find_total_seqs_from_fastqc_ignore(self):
line = "##FastQC 0.11.3"
input_record = {"not much": "you"}
expected_record = input_record.copy()
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
def test__find_total_seqs_from_fastqc_filename(self):
line = "Filename ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {"Sample": "ARH1_S1", "not much": "you"}
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
def test__find_total_seqs_from_fastqc_total(self):
line = "Total Sequences 32416013"
input_record = {"not much": "you"}
expected_record = {"Total Reads": 32416013.0, "not much": "you"}
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
# end region
# region _find_fastqc_statuses_from_fastqc
def test__find_fastqc_statuses_from_fastqc_ignore_passed_of_interest(self):
line = "PASS Basic Statistics ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': [], 'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Basic Statistics"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_not_of_interest(self):
# still should put in file name
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': [], 'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Basic Statistics"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_no_notes_has_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'Sample': 'Tester', "not much": "you"}
expected_record = {'FASTQC Messages': ['FAIL: Per tile sequence quality'], 'Sample': 'Tester',
'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_has_notes_has_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'Sample': 'Tester', 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'FAIL: Per tile sequence quality'],
'Sample': 'Tester', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_no_notes_no_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': ['FAIL: Per tile sequence quality'], 'Sample': 'ARH1_S1',
'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_has_notes_no_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'FAIL: Per tile sequence quality'],
'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
# Note: didn't retest all the functionality with WARN, just one representative case based on known
# structure of the code (whitebox, remember? :)
def test__find_fastqc_statuses_from_fastqc_ignore_warned_has_notes_has_name(self):
line = "WARN Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'Sample': 'Tester', 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'WARN: Per tile sequence quality'],
'Sample': 'Tester', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
# end region
# region _loop_over_fastqc_files
def test__loop_over_fastqc_files_w_extra_args(self):
expected_data = [
{'FASTQC Messages': ['FAIL: Per tile sequence quality', 'WARN: Overrepresented sequences'],
'Sample': 'ARH1_S1'},
{'FASTQC Messages': ['FAIL: Per tile sequence quality', 'FAIL: Per sequence quality scores',
'WARN: Overrepresented sequences'], 'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
real_output = ns_test._loop_over_fastqc_files(self._get_fastqc_test_data_dir(), "summary.txt",
ns_test._find_fastqc_statuses_from_fastqc,
["Per base sequence quality", "Per tile sequence quality",
"Per sequence quality scores", "Overrepresented sequences"])
self.assertTrue(expected_output.equals(real_output))
def test__loop_over_fastqc_files_wo_extra_args(self):
expected_data = [
{'Total Reads': 32416013.0, 'Sample': 'ARH1_S1'},
{'Total Reads': 37658828.0, 'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
real_output = ns_test._loop_over_fastqc_files(self._get_fastqc_test_data_dir(), "fastqc_data.txt",
ns_test._find_total_seqs_from_fastqc)
self.assertTrue(expected_output.equals(real_output))
# end region
def test__get_fastqc_statuses(self):
expected_data = [
{'FASTQC Messages': 'FAIL: Per tile sequence quality, WARN: Overrepresented sequences',
'Sample': 'ARH1_S1'},
{'FASTQC Messages':
'FAIL: Per tile sequence quality, FAIL: Per sequence quality scores, WARN: Overrepresented sequences',
'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
real_output = ns_test._get_fastqc_statuses(self._get_fastqc_test_data_dir(),
["Per base sequence quality", "Per tile sequence quality",
"Per sequence quality scores", "Overrepresented sequences"])
self.assertTrue(expected_output.equals(real_output))
def test__get_fastqc_total_seqs(self):
expected_data = [
{'Total Reads': 32416013.00000, 'Sample': 'ARH1_S1'},
{'Total Reads': 37658828.00000, 'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
real_output = ns_test._get_fastqc_total_seqs(self._get_fastqc_test_data_dir())
real_output_rounded = real_output.round(5)
self.assertTrue(expected_output.equals(real_output_rounded))
def test__get_fastqc_results_without_msgs(self):
expected_data = [
{'FASTQC Messages': '', 'Total Reads': 32416013.0, 'Sample': 'ARH1_S1'},
{'FASTQC Messages': '', 'Total Reads': 37658828.0, 'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
expected_output = expected_output[['Sample', 'FASTQC Messages', 'Total Reads']]
real_output = ns_test._get_fastqc_results_without_msgs(self._get_fastqc_test_data_dir(),
["Per base sequence quality"])
self.assertTrue(expected_output.equals(real_output))
def test_get_fastqc_results(self):
expected_data = [
{'Total Reads': 32416013.0, "Notes": 'Below Total Reads threshold', 'Status': 'CHECK', 'Sample': 'ARH1_S1'},
{'Total Reads': 37658828.0, 'Notes': 'FAIL: Per sequence quality scores', 'Status': 'CHECK',
'Sample': 'ARH3_S3'}]
expected_output = pandas.DataFrame(expected_data)
expected_output = expected_output[['Sample', 'Total Reads', 'Notes', 'Status']]
real_output = ns_test.get_fastqc_results(self._get_fastqc_test_data_dir(), ["Per sequence quality scores"],
32500000)
self.assertTrue(expected_output.equals(real_output))
def test__parse_star_log_final_out(self):
input_txt = """ Started job on | Apr 16 03:25:24
Started mapping on | Apr 16 03:33:31
Finished on | Apr 16 03:58:18
Mapping speed, Million of reads per hour | 78.41
Number of input reads | 32389200
Average input read length | 49
UNIQUE READS:
Uniquely mapped reads number | 28693280
Uniquely mapped reads % | 88.59%
Average mapped length | 49.71
Number of splices: Total | 4838469
Number of splices: Annotated (sjdb) | 4781275
Number of splices: GT/AG | 4778522
Number of splices: GC/AG | 40848
Number of splices: AT/AC | 4101
Number of splices: Non-canonical | 14998
Mismatch rate per base, % | 0.54%
Deletion rate per base | 0.02%
Deletion average length | 1.73
Insertion rate per base | 0.01%
Insertion average length | 1.61
MULTI-MAPPING READS:
Number of reads mapped to multiple loci | 2233606
% of reads mapped to multiple loci | 6.90%
Number of reads mapped to too many loci | 500365
% of reads mapped to too many loci | 1.54%
UNMAPPED READS:
% of reads unmapped: too many mismatches | 0.00%
% of reads unmapped: too short | 2.47%
% of reads unmapped: other | 0.50%
CHIMERIC READS:
Number of chimeric reads | 0
% of chimeric reads | 0.00%
"""
input = io.StringIO(input_txt)
expected_output_underlying = [{"Sample": "testSample",
"Total Reads": 32389200.0000,
"Uniquely Aligned Reads": 28693280.0000}]
expected_output = pandas.DataFrame(expected_output_underlying)
real_output = ns_test._parse_star_log_final_out("testSample", input)
self.assertTrue(expected_output.equals(real_output))
def test__annotate_stats_no_fails(self):
input_underlying = [{"Sample": "testSample2",
"Total Reads": 37627298.0000,
"Uniquely Aligned Reads": 28792709.0000},
{"Sample": "testSample",
"Total Reads": 32389200.0000,
"Uniquely Aligned Reads": 28693280.0000}]
input = pandas.DataFrame(input_underlying)
expected_output_underlying = [{"Sample": "testSample2",
"Total Reads": 37627298.00000,
"Aligned Reads": "Unavailable",
"Uniquely Aligned Reads": 28792709.00000,
"Percent Aligned": "Unavailable",
"Percent Uniquely Aligned": 76.52080,
"Notes": "",
"Status": ""},
{"Sample": "testSample",
"Total Reads": 32389200.00000,
"Aligned Reads": "Unavailable",
"Uniquely Aligned Reads": 28693280.00000,
"Percent Aligned": "Unavailable",
"Percent Uniquely Aligned": 88.58904,
"Notes": "",
"Status": ""}]
expected_output_unordered = pandas.DataFrame(expected_output_underlying)
expected_output = expected_output_unordered[["Sample", "Total Reads", "Aligned Reads", "Uniquely Aligned Reads",
"Percent Aligned", "Percent Uniquely Aligned", "Notes", "Status"]]
real_output = ns_test._annotate_stats(input, 'check', num_aligned_threshold=1000000,
percent_aligned_threshold=60)
rounded_real_output = real_output.round(5)
self.assertTrue(expected_output.equals(rounded_real_output))
def test__annotate_stats_fails(self):
input_underlying = [{"Sample": "testSample2",
"Total Reads": 37627298.0000,
"Uniquely Aligned Reads": 28792709.0000},
{"Sample": "testSample",
"Total Reads": 32389200.0000,
"Uniquely Aligned Reads": 28693280.0000}]
input = pandas.DataFrame(input_underlying)
expected_output_underlying = [{"Sample": "testSample2",
"Total Reads": 37627298.00000,
"Aligned Reads": "Unavailable",
"Uniquely Aligned Reads": 28792709.00000,
"Percent Aligned": "Unavailable",
"Percent Uniquely Aligned": 76.52080,
"Notes": "Below Percent Uniquely Aligned threshold",
"Status": "check"},
{"Sample": "testSample",
"Total Reads": 32389200.00000,
"Aligned Reads": "Unavailable",
"Uniquely Aligned Reads": 28693280.00000,
"Percent Aligned": "Unavailable",
"Percent Uniquely Aligned": 88.58904,
"Notes": 'Below Total Reads threshold, Below Percent Uniquely Aligned threshold',
"Status": "check"}]
expected_output_unordered = pandas.DataFrame(expected_output_underlying)
expected_output = expected_output_unordered[["Sample", "Total Reads", "Aligned Reads", "Uniquely Aligned Reads",
"Percent Aligned", "Percent Uniquely Aligned", "Notes", "Status"]]
real_output = ns_test._annotate_stats(input, 'check', num_total_threshold=32500000,
percent_unique_aligned_threshold=90)
rounded_real_output = real_output.round(5)
self.assertTrue(expected_output.equals(rounded_real_output))
# region _calc_percentage
def test__calc_percentage_known(self):
input_numerator = pandas.Series([28792709.0000, 28693280.0000])
input_denominator = pandas.Series([37627298.0000, 32389200.0000])
expected_output_list = [76.520799, 88.589036]
real_output = ns_test._calc_percentage(input_numerator, input_denominator)
real_output_list = real_output.tolist()
self.assertEqual(len(expected_output_list), len(real_output_list))
for curr_index in range(0, len(expected_output_list)):
self.assertAlmostEqual(expected_output_list[curr_index], real_output_list[curr_index], 4)
def test__calc_percentage_unknown_numerator(self):
input_numerator = pandas.Series(["Unavailable", "Unavailable"])
input_denominator = pandas.Series([28792709.0000, 28693280.0000])
expected_output = pandas.Series(["Unavailable", "Unavailable"])
real_output = ns_test._calc_percentage(input_numerator, input_denominator)
self.assertTrue(expected_output.equals(real_output))
def test__calc_percentage_unknown_denominator(self):
input_numerator = pandas.Series([28792709.0000, 28693280.0000])
input_denominator =
|
pandas.Series(["Unavailable", "Unavailable"])
|
pandas.Series
|
import pytest
import json
import pandas as pd
from piper.xl import WorkBook
from piper.factory import bad_quality_orders
from piper.factory import xl_test_data
from pathlib import Path
relative_folder = Path(__file__).parents[1] / 'temp/'
@pytest.fixture
def sample_orders_01():
return bad_quality_orders()
@pytest.fixture
def sample_orders_02():
return xl_test_data()
def test_workbook_add_sheet_auto(sample_orders_01):
file_name = relative_folder / 'WorkBook - auto sheet.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto')
wb.close()
expected = 1
actual = wb.last_sheet_idx
assert expected == actual
def test_workbook_add_sheet_test_zoom(sample_orders_01):
file_name = relative_folder / 'WorkBook - zoom.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', zoom=130)
wb.close()
expected = 130
actual = wb.sheet_dict.get('sheet1')[6]
assert expected == actual
def test_workbook_add_sheet_test_tab_color(sample_orders_01):
file_name = relative_folder / 'WorkBook - tab color.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', tab_color='green')
wb.close()
expected = 'green'
actual = wb.sheet_dict.get('sheet1')[5]
assert expected == actual
def test_workbook_add_sheet_test_index(sample_orders_01):
file_name = relative_folder / 'WorkBook - with index.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', index=True)
wb.close()
expected = True
actual = wb.sheet_dict.get('sheet1')[3]
assert expected == actual
def test_workbook_add_sheet_test_invalid_theme(sample_orders_01):
file_name = relative_folder / 'WorkBook - sheet dictionary meta.xlsx'
df =
|
pd.DataFrame(sample_orders_01)
|
pandas.DataFrame
|
from functools import partial
import json
import logging
import os
from pkg_resources import resource_filename, Requirement
import pandas as pd
from requests.exceptions import HTTPError
from solarforecastarbiter.io.fetch import arm
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'arm_reference_sites.json')
DOE_ARM_SITE_VARIABLES = {
'qcrad': arm.IRRAD_VARIABLES,
'met': arm.MET_VARIABLES,
}
DOE_ARM_VARIABLE_MAP = {
'down_short_hemisp': 'ghi',
'short_direct_normal': 'dni',
'down_short_diffuse_hemisp': 'dhi',
'temp_mean': 'air_temperature',
'rh_mean': 'relative_humidity',
'wspd_arith_mean': 'wind_speed',
}
logger = logging.getLogger('reference_data')
def _determine_stream_vars(datastream):
"""Returns a list of variables available based on datastream name.
Parameters
----------
datastream: str
Datastream name, or the product name. This string is searched for
`met` or `qcrad` and returns a list of expected variables.
Returns
-------
list of str
The variable names that can be found in the file.
"""
available = []
for stream_type, arm_vars in DOE_ARM_SITE_VARIABLES.items():
if stream_type in datastream:
available = available + arm_vars
return available
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
the matched DOE_ARM_VARIABLE_MAP.
Parameters
----------
api : solarforecastarbiter.io.api.APISession
An active Reference user session.
site : datamodel.Site
The site object for which to create Observations.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.error(f'Failed to initialize observations for {site.name} '
'extra parameters could not be loaded.')
return
site_vars = site_variables_from_extra_params(site_extra_params)
for sfa_var in site_vars:
logger.info(f'Creating {sfa_var} at {site.name}')
try:
common.create_observation(
api, site, sfa_var)
except HTTPError as e:
logger.error(f'Could not create Observation for "{sfa_var}" '
f'at DOE ARM site {site.name}')
logger.debug(f'Error: {e.response.text}')
def initialize_site_forecasts(api, site):
"""
Create a forecast for each variable at the site.
Parameters
----------
api : solarforecastarbiter.io.api.APISession
An active Reference user session.
site : datamodel.Site
The site object for which to create Forecasts.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.error('Failed to initialize reference forecasts for '
f'{site.name} extra parameters could not be loaded.')
return
site_vars = site_variables_from_extra_params(site_extra_params)
common.create_forecasts(api, site, site_vars,
default_forecasts.TEMPLATE_FORECASTS)
def fetch(api, site, start, end, *, doe_arm_user_id, doe_arm_api_key):
"""Retrieve observation data for a DOE ARM site between start and end.
Parameters
----------
api : io.APISession
Unused but conforms to common.update_site_observations call
site : datamodel.Site
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
doe_arm_user_id : str
User ID to access the DOE ARM api.
doe_arm_api_key : str
API key to access the DOE ARM api.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
return pd.DataFrame()
available_datastreams = site_extra_params['datastreams']
datastreams = {}
# Build a dict with top-level keys to 'met' and 'qcrad' if meteorological
# or irradiance data exists at the site. This is to later group dataframes
# created from each datastream by the type of data found in the stream.
for ds_type in ['met', 'qcrad']:
if ds_type in available_datastreams:
ds_type_dict = {}
streams = available_datastreams[ds_type]
# When a dict is present each key is a datastream and value is
# a date range for which the datastream contains data. We need to
# determine which streams to use to get all of the requested data.
if isinstance(streams, dict):
ds_type_dict.update(
find_stream_data_availability(streams, start, end))
else:
# If a single string datastream name exists, we assume that all
# available data is contained in the stream. Deferring to the
# data fetch process, which will fail to retrieve data and
# continue gracefully.
ds_type_dict[streams] = (start, end)
datastreams[ds_type] = ds_type_dict
site_dfs = []
for stream_type in datastreams:
# Stitch together all the datastreams with similar data.
stream_type_dfs = []
for datastream, date_range in datastreams[stream_type].items():
stream_df = arm.fetch_arm(
doe_arm_user_id,
doe_arm_api_key,
datastream,
_determine_stream_vars(datastream),
date_range[0].tz_convert(site.timezone),
date_range[1].tz_convert(site.timezone)
)
if stream_df.empty:
logger.warning(f'Datastream {datastream} for site {site.name} '
f'contained no entries from {start} to {end}.')
else:
stream_type_dfs.append(stream_df)
if stream_type_dfs:
# Concatenate all dataframes of similar data
stream_type_df = pd.concat(stream_type_dfs)
site_dfs.append(stream_type_df)
if site_dfs:
# Join dataframes with different variables along the index, this has
# the side effect of introducing missing data if any requests have
# failed.
obs_df = pd.concat(site_dfs, axis=1)
obs_df = obs_df.rename(columns=DOE_ARM_VARIABLE_MAP)
return obs_df
else:
logger.warning(f'Data for site {site.name} contained no entries from '
f'{start} to {end}.')
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
|
pd.Series([1.0, 2])
|
pandas.Series
|
import json
from decimal import Decimal
from oslo_config import cfg
import numpy as np
import pandas as pd
import pandas_schema
import datetime
from pandas_schema import Column
from pandas_schema.validation import CustomElementValidation
from st2common.runners.base_action import Action
__all__ = ["CleanCsvDataAction"]
class CleanCsvDataAction(Action):
def __init__(self, config):
super(CleanCsvDataAction, self).__init__(config)
self._config = self.config
self._data_file_path = self._config.get('data_file_path', None)
self._json_schema_path = self._config.get('json_schema_path', None)
if not self._config:
raise ValueError('Missing config yaml')
if not self._data_file_path:
raise ValueError('Missing CSV data file path in config file')
if not self._json_schema_path:
raise ValueError('Missing JSON Schema data file path in config file')
def check_decimal(self, dec):
try:
Decimal(dec)
except ValueError:
return False
return True
def check_time_stamp(self, dt):
try:
pd.to_datetime(dt)
except ValueError:
return False
return True
def check_int(self, num):
try:
int(num)
except ValueError:
return False
return True
def run(self):
# define validation elements
self.logger.info('1. Starting data Clean Action ..')
system_packs_base_path = cfg.CONF.content.system_packs_base_path
path_of_pack = system_packs_base_path + '/monitor_ingest'
success = False
VALIDATORS = {
'decimal': CustomElementValidation(
lambda d: self.check_decimal(d), 'is not decimal'),
'int': CustomElementValidation(lambda i: self.check_int(i),
'is not integer'),
'null': CustomElementValidation(lambda d: d is not np.nan,
'this field cannot be null'),
'time_stamp': CustomElementValidation(
lambda d: self.check_time_stamp(d),
'time_stamp format is not valid')
}
self.logger.info('2. Loading Schema ..')
with open(self._json_schema_path, 'r') as my_json:
json_schema = json.load(my_json)
column_list = [Column(k, [VALIDATORS[v] for v in vals]) for k, vals in
json_schema.items()]
schema = pandas_schema.Schema(column_list)
self.logger.info('3. Loading CSV Data ..')
data =
|
pd.read_csv(self._data_file_path)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Important Variable Selection with SNPs
Created on Fri Jan 31 16:31:01 2020
@author: <NAME>
"""
# Import the libraries
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import MultiTaskLassoCV, MultiTaskElasticNetCV, LassoCV, ElasticNetCV, MultiTaskElasticNet, MultiTaskLasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
# Using chunk size to read rice data
def read_x_cont():
chunksize = 100
X_ct = pd.DataFrame()
for chunk in pd.read_csv("X_cont_ls_el.csv",low_memory=False, chunksize=chunksize, memory_map=True):
X_ct = pd.concat([X_ct, chunk])
return(X_ct)
# Function of data preprocessing
def process_variable(X, y):
# Drop 'IID' columns
X = X.drop('IID', axis = 1)
# Split data to training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
# Convert from integer to float
X_train= X_train.astype(float, 32)
X_test = X_test.astype(float, 32)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train_scl = scaler.fit_transform(X_train)
X_test_scl = scaler.transform(X_test) # we transform rather than fit_transform
return(X_train_scl, X_test_scl, y_train, y_test)
"""Random Forest Regressor"""
#Function to run random forest with grid search and k-fold cross-validation.
def get_rf_model(X_train, y_train, X_test, y_test):
# Hyperparameters search grid
rf_param_grid = {'bootstrap': [False, True],
'n_estimators': [60, 70, 80, 90, 100],
'max_features': [0.6, 0.65, 0.7, 0.75, 0.8],
'min_samples_leaf': [1],
'min_samples_split': [2]
}
# Instantiate random forest regressor
rf_estimator = RandomForestRegressor(random_state=None)
# Create the GridSearchCV object
rf_model = GridSearchCV(estimator=rf_estimator, param_grid=rf_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
rf_model.fit(X_train, y_train)
# Get the best model
rf_model_best = rf_model.best_estimator_
# Make predictions using the optimised parameters
rf_pred = rf_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, rf_pred)
# Find r-squared
r2 = r2_score(y_test, rf_pred)
best_prs = rf_model.best_params_
print("Best Parameters:\n", rf_model.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Support Vector Regressor"""
#Function to run support vector machine with grid search and k-fold cross-validation.
def get_svm_model(X_train, y_train, X_test, y_test):
# Parameter grid
svm_param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 10], "kernel": ["rbf"]}
# Create SVM grid search regressor
svm_grid = GridSearchCV(estimator = SVR(), param_grid= svm_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
svm_grid.fit(X_train, y_train)
# Get the best model
svm_model_best = svm_grid.best_estimator_
# Make predictions using the optimised parameters
svm_pred = svm_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, svm_pred)
# Find r-squared
r2 = r2_score(y_test, svm_pred)
best_prs = svm_grid.best_params_
print("Best Parameters:\n", svm_grid.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Lasso and Multi Task Lasso"""
#Lasso
def get_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Lasso CV
ls_grid = LassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Lasso
def get_multitask_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi-task Lasso CV
ls_grid = MultiTaskLassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multit-task Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
"""Elastic Net and Multi Task Elastic Net"""
# Elastic Net
def get_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Elastic Net CV
el_grid = ElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Elastic Net
def get_multitask_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi Task Elastic Net CV
el_grid = MultiTaskElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multi-task ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Evaluation each trait by multi-task Lasso
def eval_mtls_split_trait(alpha, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
ls_tfl_grw = MultiTaskLasso(alpha, random_state = 0)
# Train the regressor
ls_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
ls_pred = ls_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], ls_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], ls_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], ls_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], ls_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
# Evaluation each trait by multi-task Elastic Net
def eval_mtel_split_trait(alpha, l1_ratio, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
el_tfl_grw = MultiTaskElasticNet(alpha, l1_ratio, random_state = 0)
# Train the regressor
el_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
el_pred = el_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], el_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], el_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], el_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], el_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
if __name__ == '__main__':
print("")
print("")
print("|============================================================================|")
print("| |")
print("| ----- IMPORTANT VARIABLE SELECTION WITH SNPS ----- |")
print("| |")
print("|============================================================================|")
print("")
print("")
print("********************************* INPUT DATA *********************************")
print("")
print("Import data may take several minutes, please wait...")
print("")
# Import data
X_cont = read_x_cont()
cols = X_cont.columns[1::]
# Load data after pre-processinng
y_tfl = pd.read_csv("y_tfl.csv", header=None)
y_grw = pd.read_csv("y_grw.csv", header=None)
y_tfl_grw = pd.read_csv("y_tfl_grw.csv", header=None)
X_grw_2 = pd.read_csv("X_grw_2.csv", header='infer')
X_grw_3 = pd.read_csv("X_grw_3.csv", header='infer')
X_grw_4 = pd.read_csv("X_grw_4.csv", header='infer')
X_grw_5 = pd.read_csv("X_grw_5.csv", header='infer')
X_tfl_2 = pd.read_csv("X_tfl_2.csv", header='infer')
X_tfl_3 = pd.read_csv("X_tfl_3.csv", header='infer')
X_tfl_4 = pd.read_csv("X_tfl_4.csv", header='infer')
X_tfl_5 = pd.read_csv("X_tfl_5.csv", header='infer')
X_tfl_6 = pd.read_csv("X_tfl_6.csv", header='infer')
X_tfl_grw_2 = pd.read_csv("X_tfl_grw_2.csv", header='infer')
X_tfl_grw_25 = pd.read_csv("X_tfl_grw_25.csv", header='infer')
X_tfl_grw_1 = pd.read_csv("X_tfl_grw_1.csv", header='infer')
X_tfl_grw_75 = pd.read_csv("X_tfl_grw_75.csv", header='infer')
X_tfl_grw_3 = pd.read_csv("X_tfl_grw_3.csv", header='infer')
print("")
# Transform response variables to matrix type.
y_tfl = y_tfl.values.ravel()
y_grw = y_grw.values.ravel()
y_tfl_grw = y_tfl_grw.values
# Normalize rice data
X_grw_2_train, X_grw_2_test, y_grw_2_train, y_grw_2_test = process_variable(X_grw_2, y_grw)
X_grw_3_train, X_grw_3_test, y_grw_3_train, y_grw_3_test = process_variable(X_grw_3, y_grw)
X_grw_4_train, X_grw_4_test, y_grw_4_train, y_grw_4_test = process_variable(X_grw_4, y_grw)
X_grw_5_train, X_grw_5_test, y_grw_5_train, y_grw_5_test = process_variable(X_grw_5, y_grw)
X_tfl_2_train, X_tfl_2_test, y_tfl_2_train, y_tfl_2_test = process_variable(X_tfl_2, y_tfl)
X_tfl_3_train, X_tfl_3_test, y_tfl_3_train, y_tfl_3_test = process_variable(X_tfl_3, y_tfl)
X_tfl_4_train, X_tfl_4_test, y_tfl_4_train, y_tfl_4_test = process_variable(X_tfl_4, y_tfl)
X_tfl_5_train, X_tfl_5_test, y_tfl_5_train, y_tfl_5_test = process_variable(X_tfl_5, y_tfl)
X_tfl_6_train, X_tfl_6_test, y_tfl_6_train, y_tfl_6_test = process_variable(X_tfl_6, y_tfl)
X_tfl_grw_2_train, X_tfl_grw_2_test, y_tfl_grw_2_train, y_tfl_grw_2_test = process_variable(X_tfl_grw_2, y_tfl_grw)
X_tfl_grw_25_train, X_tfl_grw_25_test, y_tfl_grw_25_train, y_tfl_grw_25_test = process_variable(X_tfl_grw_25, y_tfl_grw)
X_tfl_grw_1_train, X_tfl_grw_1_test, y_tfl_grw_1_train, y_tfl_grw_1_test = process_variable(X_tfl_grw_1, y_tfl_grw)
X_tfl_grw_75_train, X_tfl_grw_75_test, y_tfl_grw_75_train, y_tfl_grw_75_test = process_variable(X_tfl_grw_75, y_tfl_grw)
X_tfl_grw_3_train, X_tfl_grw_3_test, y_tfl_grw_3_train, y_tfl_grw_3_test = process_variable(X_tfl_grw_3, y_tfl_grw)
X_grw_train, X_grw_test, y_grw_train, y_grw_test = process_variable(X_cont, y_grw)
X_tfl_train, X_tfl_test, y_tfl_train, y_tfl_test = process_variable(X_cont, y_tfl)
X_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_train, y_tfl_grw_test = process_variable(X_cont, y_tfl_grw)
print("")
print("******************************* TRAINING MODELS *****************************")
print("")
rf_grw_mse = []
rf_grw_r2 = []
rf_tfl_mse = []
rf_tfl_r2 = []
rf_grw_prs = []
rf_tfl_prs = []
rf_tfl_grw_mse_0 = []
rf_tfl_grw_r2_0 = []
rf_tfl_grw_prs_0 = []
rf_tfl_grw_mse_1 = []
rf_tfl_grw_r2_1 = []
rf_tfl_grw_prs_1 = []
svr_grw_mse = []
svr_grw_r2 = []
svr_tfl_mse = []
svr_tfl_r2 = []
svr_grw_prs = []
svr_tfl_prs = []
svr_tfl_grw_mse_0 = []
svr_tfl_grw_r2_0 = []
svr_tfl_grw_prs_0 = []
svr_tfl_grw_mse_1 = []
svr_tfl_grw_r2_1 = []
svr_tfl_grw_prs_1 = []
# Filtering variables by p_value.
p_value = ['<=5e-6', '<=5e-5', '<=5e-4', '<=5e-3', '<=5e-2']
p_value_2 = ['<=5e-3','<=7.5e-3', '<=1e-2', '<=2.5e-2', '<=5e-2']
print("Find mse and r-squared for random forest model of grain weight...")
rf_grw_mse_2, rf_grw_r2_2, rf_grw_prs_2 = get_rf_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
rf_grw_mse.append(rf_grw_mse_2)
rf_grw_r2.append(rf_grw_r2_2)
rf_grw_prs.append(rf_grw_prs_2)
rf_grw_mse_3, rf_grw_r2_3, rf_grw_prs_3 = get_rf_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
rf_grw_mse.append(rf_grw_mse_3)
rf_grw_r2.append(rf_grw_r2_3)
rf_grw_prs.append(rf_grw_prs_3)
rf_grw_mse_4, rf_grw_r2_4, rf_grw_prs_4 = get_rf_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
rf_grw_mse.append(rf_grw_mse_4)
rf_grw_r2.append(rf_grw_r2_4)
rf_grw_prs.append(rf_grw_prs_4)
rf_grw_mse_5, rf_grw_r2_5, rf_grw_prs_5 = get_rf_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
rf_grw_mse.append(rf_grw_mse_5)
rf_grw_r2.append(rf_grw_r2_5)
rf_grw_prs.append(rf_grw_prs_5)
rf_grw = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'rf_grw_r2':rf_grw_r2[::-1], 'rf_grw_prs':rf_grw_prs[::-1]})
rf_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
rf_grw.to_csv('rf_grw.csv')
print('RF of grain weight is saved')
print("Find mse and r-squared for random forest model of time to flowering...")
rf_tfl_mse_2, rf_tfl_r2_2, rf_tfl_prs_2 = get_rf_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
rf_tfl_mse.append(rf_tfl_mse_2)
rf_tfl_r2.append(rf_tfl_r2_2)
rf_tfl_prs.append(rf_tfl_prs_2)
rf_tfl_mse_3, rf_tfl_r2_3, rf_tfl_prs_3 = get_rf_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
rf_tfl_mse.append(rf_tfl_mse_3)
rf_tfl_r2.append(rf_tfl_r2_3)
rf_tfl_prs.append(rf_tfl_prs_3)
rf_tfl_mse_4, rf_tfl_r2_4, rf_tfl_prs_4 = get_rf_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
rf_tfl_mse.append(rf_tfl_mse_4)
rf_tfl_r2.append(rf_tfl_r2_4)
rf_tfl_prs.append(rf_tfl_prs_4)
rf_tfl_mse_5, rf_tfl_r2_5, rf_tfl_prs_5 = get_rf_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
rf_tfl_mse.append(rf_tfl_mse_5)
rf_tfl_r2.append(rf_tfl_r2_5)
rf_tfl_prs.append(rf_tfl_prs_5)
rf_tfl_mse_6, rf_tfl_r2_6, rf_tfl_prs_6 = get_rf_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
rf_tfl_mse.append(rf_tfl_mse_6)
rf_tfl_r2.append(rf_tfl_r2_6)
rf_tfl_prs.append(rf_tfl_prs_6)
rf_tfl = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'rf_tfl_r2':rf_tfl_r2[::-1], 'rf_tfl_prs':rf_tfl_prs[::-1]})
rf_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
rf_tfl.to_csv('rf_tfl.csv')
print('RF of time to flowering is saved')
print("Find mse and r-squared for random forest model of time to flowering and grain weight...")
# Output is time to flowering
rf_tfl_grw_mse_2_0, rf_tfl_grw_r2_2_0, rf_tfl_grw_prs_2_0 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_2_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_2_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_2_0)
rf_tfl_grw_mse_25_0, rf_tfl_grw_r2_25_0, rf_tfl_grw_prs_25_0 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_25_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_25_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_25_0)
rf_tfl_grw_mse_1_0, rf_tfl_grw_r2_1_0, rf_tfl_grw_prs_1_0 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_1_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_1_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_1_0)
rf_tfl_grw_mse_75_0, rf_tfl_grw_r2_75_0, rf_tfl_grw_prs_75_0 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_75_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_75_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_75_0)
rf_tfl_grw_mse_3_0, rf_tfl_grw_r2_3_0, rf_tfl_grw_prs_3_0 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_3_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_3_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_3_0)
rf_tfl_grw_0 = pd.DataFrame({'rf_tfl_grw_mse_0':rf_tfl_grw_mse_0[::-1], 'rf_tfl_grw_r2_0':rf_tfl_grw_r2_0[::-1], 'rf_tfl_grw_prs_0':rf_tfl_grw_prs_0[::-1]})
rf_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_0.to_csv('rf_tfl_grw_0.csv')
# Output is grain weight
rf_tfl_grw_mse_2_1, rf_tfl_grw_r2_2_1, rf_tfl_grw_prs_2_1 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_2_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_2_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_2_1)
rf_tfl_grw_mse_25_1, rf_tfl_grw_r2_25_1, rf_tfl_grw_prs_25_1 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_25_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_25_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_25_1)
rf_tfl_grw_mse_1_1, rf_tfl_grw_r2_1_1, rf_tfl_grw_prs_1_1 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_1_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_1_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_1_1)
rf_tfl_grw_mse_75_1, rf_tfl_grw_r2_75_1, rf_tfl_grw_prs_75_1 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_75_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_75_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_75_1)
rf_tfl_grw_mse_3_1, rf_tfl_grw_r2_3_1, rf_tfl_grw_prs_3_1 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_3_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_3_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_3_1)
rf_tfl_grw_1 = pd.DataFrame({'rf_tfl_grw_mse_1':rf_tfl_grw_mse_1[::-1], 'rf_tfl_grw_r2_1':rf_tfl_grw_r2_1[::-1], 'rf_tfl_grw_prs_1':rf_tfl_grw_prs_1[::-1]})
rf_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_1.to_csv('rf_tfl_grw_1.csv')
print('RF of time to flowering and grain weight is saved')
print("Find mse and r-squared for svm model of grain weight...")
svr_grw_mse_2, svr_grw_r2_2, svr_grw_prs_2 = get_svm_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
svr_grw_mse.append(svr_grw_mse_2)
svr_grw_r2.append(svr_grw_r2_2)
svr_grw_prs.append(svr_grw_prs_2)
svr_grw_mse_3, svr_grw_r2_3, svr_grw_prs_3 = get_svm_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
svr_grw_mse.append(svr_grw_mse_3)
svr_grw_r2.append(svr_grw_r2_3)
svr_grw_prs.append(svr_grw_prs_3)
svr_grw_mse_4, svr_grw_r2_4, svr_grw_prs_4 = get_svm_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
svr_grw_mse.append(svr_grw_mse_4)
svr_grw_r2.append(svr_grw_r2_4)
svr_grw_prs.append(svr_grw_prs_4)
svr_grw_mse_5, svr_grw_r2_5, svr_grw_prs_5 = get_svm_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
svr_grw_mse.append(svr_grw_mse_5)
svr_grw_r2.append(svr_grw_r2_5)
svr_grw_prs.append(svr_grw_prs_5)
svr_grw = pd.DataFrame({'svr_grw_mse':svr_grw_mse[::-1], 'svr_grw_r2':svr_grw_r2[::-1], 'svr_grw_prs':svr_grw_prs[::-1]})
svr_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
svr_grw.to_csv('svr_grw.csv')
print('SVR of grain weight is saved')
print("Find mse and r-squared for svm model of time to flowering...")
svr_tfl_mse_2, svr_tfl_r2_2, svr_tfl_prs_2 = get_svm_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
svr_tfl_mse.append(svr_tfl_mse_2)
svr_tfl_r2.append(svr_tfl_r2_2)
svr_tfl_prs.append(svr_tfl_prs_2)
svr_tfl_mse_3, svr_tfl_r2_3, svr_tfl_prs_3 = get_svm_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
svr_tfl_mse.append(svr_tfl_mse_3)
svr_tfl_r2.append(svr_tfl_r2_3)
svr_tfl_prs.append(svr_tfl_prs_3)
svr_tfl_mse_4, svr_tfl_r2_4, svr_tfl_prs_4 = get_svm_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
svr_tfl_mse.append(svr_tfl_mse_4)
svr_tfl_r2.append(svr_tfl_r2_4)
svr_tfl_prs.append(svr_tfl_prs_4)
svr_tfl_mse_5, svr_tfl_r2_5, svr_tfl_prs_5 = get_svm_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
svr_tfl_mse.append(svr_tfl_mse_5)
svr_tfl_r2.append(svr_tfl_r2_5)
svr_tfl_prs.append(svr_tfl_prs_5)
svr_tfl_mse_6, svr_tfl_r2_6, svr_tfl_prs_6 = get_svm_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
svr_tfl_mse.append(svr_tfl_mse_6)
svr_tfl_r2.append(svr_tfl_r2_6)
svr_tfl_prs.append(svr_tfl_prs_6)
svr_tfl = pd.DataFrame({'svr_tfl_mse':svr_tfl_mse[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1], 'svr_tfl_prs':svr_tfl_prs[::-1]})
svr_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
svr_tfl.to_csv('svr_tfl.csv')
print('SVR of time to flowering is saved')
print("Find mse and r-squared for svm model of time to flowering and grain weight... ")
# Output is time to flowering
svr_tfl_grw_mse_2_0, svr_tfl_grw_r2_2_0, svr_tfl_grw_prs_2_0 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_2_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_2_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_2_0)
svr_tfl_grw_mse_25_0, svr_tfl_grw_r2_25_0, svr_tfl_grw_prs_25_0 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_25_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_25_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_25_0)
svr_tfl_grw_mse_1_0, svr_tfl_grw_r2_1_0, svr_tfl_grw_prs_1_0 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_1_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_1_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_1_0)
svr_tfl_grw_mse_75_0, svr_tfl_grw_r2_75_0, svr_tfl_grw_prs_75_0 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_75_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_75_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_75_0)
svr_tfl_grw_mse_3_0, svr_tfl_grw_r2_3_0, svr_tfl_grw_prs_3_0 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_3_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_3_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_3_0)
svr_tfl_grw_0 = pd.DataFrame({'svr_tfl_grw_mse_0':svr_tfl_grw_mse_0[::-1], 'svr_tfl_grw_r2_0':svr_tfl_grw_r2_0[::-1], 'svr_tfl_grw_prs_0':svr_tfl_grw_prs_0[::-1]})
svr_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_0.to_csv('svr_tfl_grw_0.csv')
# Output is grain weight
svr_tfl_grw_mse_2_1, svr_tfl_grw_r2_2_1, svr_tfl_grw_prs_2_1 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_2_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_2_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_2_1)
svr_tfl_grw_mse_25_1, svr_tfl_grw_r2_25_1, svr_tfl_grw_prs_25_1 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_25_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_25_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_25_1)
svr_tfl_grw_mse_1_1, svr_tfl_grw_r2_1_1, svr_tfl_grw_prs_1_1 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_1_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_1_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_1_1)
svr_tfl_grw_mse_75_1, svr_tfl_grw_r2_75_1, svr_tfl_grw_prs_75_1 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_75_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_75_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_75_1)
svr_tfl_grw_mse_3_1, svr_tfl_grw_r2_3_1, svr_tfl_grw_prs_3_1 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_3_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_3_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_3_1)
svr_tfl_grw_1 = pd.DataFrame({'svr_tfl_grw_mse_1':svr_tfl_grw_mse_1[::-1], 'svr_tfl_grw_r2_1':svr_tfl_grw_r2_1[::-1], 'svr_tfl_grw_prs_1':svr_tfl_grw_prs_1[::-1]})
svr_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_1.to_csv('svr_tfl_grw_1.csv')
print("")
print("Create data frames...")
print("")
grw_mse = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'svr_grw_mse':svr_grw_mse[::-1]})
grw_mse.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
grw_r2 = pd.DataFrame({'rf_grw_r2':rf_grw_r2[::-1], 'svr_grw_r2':svr_grw_r2[::-1]})
grw_r2.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
tfl_mse =
|
pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'svr_tfl_mse':svr_tfl_mse[::-1]})
|
pandas.DataFrame
|
import sys
def ipython_info():
ip = False
if 'ipykernel' in sys.modules:
ip = 'notebook'
elif 'IPython' in sys.modules:
ip = 'terminal'
return ip
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from scipy.integrate import odeint
if ipython_info() == "notebook":
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
# Custom library
from .state import State
from .states import CompartmentStates
from .network import CompartmentNetwork
from ..params.metrics import custom_loss
from ..params.optimizer import ParamsOptimizer
from ..policies.utils import multiple_sigmoid_response
class CompartmentalModel:
def __init__(self,compartments,params = None,dimensions = None,offset = None,I0 = 1,initial_state = None,start_state = None,start_date = None):
self._states = compartments
self._dimensions = dimensions
self._offset = offset
self._I0 = I0
self._initial_state = initial_state if initial_state is not None else compartments[0]
self._start_state = start_state if start_state is not None else compartments[1]
self._start_date = start_date
self.params = params
assert self._start_state in self._states
assert self._initial_state in self._states
if self._dimensions is not None:
assert isinstance(self._dimensions,dict)
self._compartments = ["_".join(x) for x in itertools.product(self.states,*self.dimensions.values())]
else:
self._compartments = self.states
self.network = CompartmentNetwork(self.compartments)
@property
def offset(self):
if self._offset is None:
return 0
else:
return int(self._offset)
@property
def I0(self):
return self._I0
@property
def start_state(self):
return self._start_state
@property
def initial_state(self):
return self._initial_state
@property
def start_date(self):
return self._start_date
@property
def states(self):
return self._states
@property
def granularity(self):
if self._dimensions is None:
return None
else:
return list(self.dimensions.keys())
@property
def dimensions_product(self):
return list(itertools.product(*self.dimensions.values()))
@property
def dimensions(self):
if self._dimensions is None:
return {}
else:
return self._dimensions
@property
def compartments(self):
return self._compartments
@property
def compartments_index(self):
return pd.MultiIndex.from_product(self.states,*self.dimensions.values())
@staticmethod
def make_callable(value):
if callable(value):
return value
elif isinstance(value, dict):
assert "dates" in value.keys() and "values" in value.keys(), "You provided a dictionnary as values, it shouldhave two keys: dates and values"
return lambda y,t : value["values"][next(i for i,x in enumerate(value["dates"] + [np.infty]) if x>=t)-1]
else:
return lambda y,t : value
def solve(self,n_days = 100,init_state = None,start_date = None,d = 1):
"""Main ODE solver function to predict future population values in each compartments
The function will use the network created by transitions between compartments
- Derivatives are computed using transitions
- ODE system is integrated using scipy odeint solver
Args:
init_state (dict, list, tuple, numpy array): the first value to initialize the solver, ie init population
n_days (int): number of days on which to run the solver, ie prediction horizon
start_date (str or datetime): use real dates instead of just number of days
Returns:
states (States) - a custom pd.DataFrame with population by compartment over time
"""
# If init state is not given we use I0
if init_state is None:
assert self.start_state is not None
init_state = int(self.I0)
# Transform init_state into state object
init_state = self.make_state(init_state)
# Safety checks
tol = 2
assert hasattr(self,"compartments")
assert len(init_state) == len(self.compartments)
# assert hasattr(self,"N")
# assert np.abs(init_state.sum() - self.N) < tol,f"Init state {init_state.values} does not sum to total population {self.N}"
assert n_days > self.offset
# Grid of time points (in days)
# Take offset into account
offset = self.offset
t = np.linspace(0, n_days - offset, (n_days - offset +1)*d)
# Integrate the model equations over the time grid, t.
states = odeint(self.derivative, init_state, t)
# Converts to DataFrame and then to custom object
states = pd.DataFrame(states,columns = self.compartments)
if d > 1:
states.index = states.index / d
# Add offset into account
if offset > 0:
states.index = range(offset,n_days + 1)
states = states.reindex(range(0,n_days + 1))
states = states.fillna(method = "bfill")
elif offset < 0:
states.index = [x + offset for x in states.index]
# Convert to custom object
states = CompartmentStates(states)
states.build_aggregates(self.states)
# If start date is given, convert to dates
if self.start_date is not None:
start_date = self.start_date
if start_date is not None:
index =
|
pd.to_datetime(start_date)
|
pandas.to_datetime
|
from tidyms import container
from tidyms._names import *
import numpy as np
import pandas as pd
import pytest
import os
def test_class_getter(data_container_with_order):
data = data_container_with_order
class_series = pd.Series(data=data.classes, index=data.classes.index)
assert data.classes.equals(class_series)
def test_class_setter(data_container_with_order):
data = data_container_with_order
class_series = pd.Series(data=data.classes, index=data.classes.index)
#set classes to an arbitrary value
data.classes = 4
data.classes = class_series
assert data.classes.equals(class_series)
def test_batch_getter(data_container_with_order):
data = data_container_with_order
batch_series = pd.Series(data=data.batch, index=data.batch.index)
assert data.batch.equals(batch_series)
def test_batch_getter_no_batch_information(data_container_without_order):
data = data_container_without_order
with pytest.raises(container.BatchInformationError):
data.batch
def test_batch_setter(data_container_with_order):
data = data_container_with_order
b = np.arange(data.data_matrix.shape[0])
batch_series = pd.Series(data=b, index=data.batch.index)
data.batch = b
assert data.batch.equals(batch_series)
def test_order_getter(data_container_with_order):
data = data_container_with_order
order_series =
|
pd.Series(data=data.order, index=data.order.index)
|
pandas.Series
|
import pathlib
import numpy as np
import pandas as pd
from src.lattice_utils import Topology
from src.errorAnalysis import compute_NMSE
## define lattice based on output of main_predict.py
# sample no. (leave at 1 if only single stiffness has been passed)
sample = 1
# n-th best prediction of selected sample
pred = 1
# set to true for plotting
plot_lattice = True
# set to true for exporting nodes and connectivities
export_lattice = True
print('Export predicted truss.\n-------------------------------------')
# create directory
pathlib.Path('prediction/conn_coord').mkdir(exist_ok=True)
# load predicted lattice and corresponding target & predicted stiffness
lattice_descriptors = pd.read_csv("prediction/full_pred.csv")
C_target =
|
pd.read_csv("prediction/C_target.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 21:39:47 2020
@author: fisherd
"""
import os
import time
from pathlib import Path
import censusgeocode as cg
import pandas as pd
from CREDA_tools.geocoding import validators
class CensusValidator(validators.AddressValidator):
'''This class runs data through the Census Validator/Geocoder'''
score_dict = {'Exact':0.9,
'Non_Exact':0.7
}
def __init__(self, address_df):
super().__init__(address_df)
self.temp_file = Path.cwd() / "Census_temp.csv"
self.process_addresses()
def process_addresses(self):
self.address_df = self.run_validator_matches(self.address_df)
def run_validator_matches(self, to_process):
'''Returns validated, Geocoded addresses for self.address_df, using the Census tool'''
print('\nBeginning Census Geocoding')
to_process = to_process[['single_address', 'city', 'state', 'postal']]
to_return = pd.DataFrame()
start = 0
end = increment = 900
while end < to_process.shape[0]:
print(f'\tSending from {start} to {end-1} to Census')
temp = to_process[start:end]
temp.to_csv('temp_census.txt', header=False)
result = cg.addressbatch("temp_census.txt")
ordered = pd.DataFrame.from_dict(result)
if to_return.shape[0] == 0:
to_return = ordered
else:
to_return = pd.concat([to_return, ordered])
start = end
end = end + increment
time.sleep(10)
print(f'\tSending from {start} to {to_process.shape[0]} to Census')
temp = to_process[start:end]
temp.to_csv(self.temp_file, header=False)
result = cg.addressbatch(f'{self.temp_file.name}')
ordered =
|
pd.DataFrame.from_dict(result)
|
pandas.DataFrame.from_dict
|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI =
|
Period(freq='W-FRI', year=2007, month=1, day=5)
|
pandas.Period
|
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_6_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=12 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
print("============")
print("Saving results")
print("============")
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_12_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
# save values for the functional omicron related proportions for each state
prop_omicron_vars = ("r", "tau", "m0", "m1")
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state].copy()
for v in prop_omicron_vars:
# take right size of the values to be N by N
y = samples[v + "[" + str(kk + 1) + "]"].values
pd.DataFrame(y[:mob_samples]).to_csv(
results_dir
+ v
+ "_"
+ state
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# now we save the sampled TP paths
# convert the appropriate sampled susceptible depletion factors to a csv and save them for simulation
# NOTE: this will not save an updated median, mean etc for the R_I's. We don't use it so it's not
# really important but it should be noted for later if we are comparing things. The step function
# R_I -> R_I_omicron, is noticeable and shouldn't be overlooked.
df_Rhats = df_Rhats[
["state", "date", "type", "median", "bottom", "lower", "upper", "top"]
+ [i for i in range(mob_samples)]
]
# # save the file as a csv (easier to handle in Julia for now)
df_Rhats.to_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
return None
def calculate_Reff_local(
Reff,
R_I,
R_I_omicron,
voc_effect,
prop_import,
omicron_start_day,
):
"""
Apply the same mixture model idea as per the TP model to get
R_eff^L = (R_eff - rho * RI)/(1 - rho)
and use this to weight the TP historically.
"""
# calculate this all in one step. Note that we set the Reff to -1 if
# the prop_import = 1 as in that instance the relationship breaks due to division by 0.
Reff_local = np.zeros(shape=Reff.shape[0])
for n in range(len(Reff_local)):
# adjust the Reff based on the time period of interest
if n < omicron_start_day:
R_I_tmp = R_I
else:
R_I_tmp = R_I_omicron * voc_effect
if prop_import[n] < 1:
Reff_local[n] = (Reff[n] - prop_import[n] * R_I_tmp) / (1 - prop_import[n])
else:
Reff_local[n] = 0
# Reff_local = [
# (Reff[t] - prop_import[t] * R_I) / (1 - prop_import[t])
# if prop_import[t] < 1 else -1 for t in range(Reff.shape[0])
# ]
return Reff_local
def adjust_TP(data_date):
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
n_days_nowcast_TP_adjustment,
mob_samples,
)
print("============")
print("Adjusting TP forecasts using data from", data_date)
print("============")
data_date = pd.to_datetime(data_date)
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
sim_start_date = pd.to_datetime(sim_start_date)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start=third_start_date, end=third_end_date).values,
"NT": pd.date_range(start="2021-12-01", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-11-25", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-08-01", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
omicron_start_day = (pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)).days
for strain in ("Delta", "Omicron"):
"""
Run adjustment model for the local TP estimates. This will adjust the local component of the
TP
"""
print("=========================")
print("Running TP adjustment model for", strain, "TP")
print("=========================")
df_forecast2 = pd.read_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["INFECTION_DATES"],
)
inferred_prop_imports = pd.read_csv(
results_dir
+ "rho_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# read in the case data and note that we want this to be infection dates to match up to Reff changes
case_data = read_in_NNDSS(
data_date, apply_delay_at_read=True, apply_inc_at_read=True
)
case_data = case_data[["date_inferred", "STATE", "imported", "local"]]
# this is the forecasted TP dataframe, without R_L type
df_forecast2_new = df_forecast2.loc[df_forecast2.type != "R_L"]
end_date = pd.to_datetime(today) + timedelta(days=num_forecast_days)
states_to_adjust = ["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"]
# read in the samples for weighting between TP and Reff.
samples2 = pd.read_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# extract the import values
if strain == "Delta":
R_I = samples2.R_I.to_numpy()
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_delta.to_numpy()
elif strain == "Omicron":
# extract the import values
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_omicron.to_numpy()
last_date_for_reff = (
pd.to_datetime(data_date)
- pd.Timedelta(days=truncation_days + n_days_nowcast_TP_adjustment - 1)
)
print("==============")
print("The last date the Reff estimate is used is", last_date_for_reff)
print("==============")
for state in states:
# filter case data by state
case_data_state = case_data.loc[case_data.STATE == state]
# take a sum of cases each day (this does not fill out missing days)
df_cases = case_data_state.groupby(["date_inferred", "STATE"]).agg(sum)
df_cases = df_cases.reset_index()
df_cases = df_cases.set_index("date_inferred")
# now we want to fill out indices by adding 0's on days with 0 cases and ensuring we go right up to the current truncated date
idx = pd.date_range(
pd.to_datetime("2020-03-01"),
last_date_for_reff,
)
is_omicron = np.array(idx >= pd.to_datetime(omicron_start_date))
df_cases = df_cases.reindex(idx, fill_value=0)
# filter the TP and Reff by state
df_forecast2_state_R_L = df_forecast2.loc[
((df_forecast2.state == state) & (df_forecast2.type == "R_L"))
]
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
# take a rolling average of the cases over the interval of consideration
idx = (pd.to_datetime(df_forecast2_state_R_L.date) >= pd.to_datetime("2020-03-01")) & (
pd.to_datetime(df_forecast2_state_R_L.date) <= last_date_for_reff
)
df_forecast2_state_R_L_sims = df_forecast2_state_R_L.iloc[:, 9:].loc[idx]
Reff = df_Reff_state.loc[
(df_Reff_state.INFECTION_DATES >= pd.to_datetime("2020-03-01"))
& (df_Reff_state.INFECTION_DATES<= last_date_for_reff)
].iloc[:, :-2]
# take 7-day moving averages for the local, imported, and total cases
ma_period = 7
df_cases_local = df_cases["local"]
df_cases_imported = df_cases["imported"]
df_cases_local_ma = df_cases_local.rolling(7, min_periods=1).mean()
# only want to use indices over the fitting horizon, after this point we rely on the TP model
idx = (df_cases.index >= pd.to_datetime("2020-03-01")) & (
df_cases.index <= last_date_for_reff
)
df_cases_local = df_cases_local[idx]
df_cases_imported = df_cases_imported[idx]
df_cases_local_ma = df_cases_local_ma[idx]
# dictionary to store sampled Rt paths
Rt = {}
ratio_import_to_local = df_cases_imported / (df_cases_local + df_cases_imported)
# set nan or infs to 0
ratio_import_to_local.replace([np.nan, np.inf], 0, inplace=True)
ratio_import_to_local = ratio_import_to_local.rolling(7, min_periods=1).mean()
# now replace the fitted period with the correct proportions
inferred_prop_imports_state = (
inferred_prop_imports
.loc[inferred_prop_imports.state == state]
.iloc[:,1:-1]
.set_index("date")
)
n_Reff_samples = Reff.shape[1]
# loop over the TP paths for a state
for (n, col_str) in enumerate(df_forecast2_state_R_L_sims):
ratio_import_to_local_combined = pd.Series(
inferred_prop_imports_state[str(int(col_str) % mob_samples)][i]
if i in inferred_prop_imports_state.index else ratio_import_to_local[i]
for i in ratio_import_to_local.index
)
ratio_import_to_local_combined.index = ratio_import_to_local.index
ratio_import_to_local_combined = ratio_import_to_local_combined.to_numpy()
if state in states_to_adjust:
# sample a Reff path from EpyReff (there are only 2000 of these)
Reff_sample = Reff.iloc[:, n % n_Reff_samples].to_numpy()
TP_local = np.array(df_forecast2_state_R_L_sims[col_str])
# Index by col_str % n_samples as we would be cycling the values in the R_I
Reff_local = calculate_Reff_local(
Reff_sample,
R_I[int(col_str) % mob_samples],
R_I_omicron[int(col_str) % mob_samples],
voc_effect[int(col_str) % mob_samples],
ratio_import_to_local_combined,
omicron_start_day=omicron_start_day,
)
omega = pd.Series(
(
np.random.beta(35, L_ma) if L_ma >= 5 else 1
for L_ma in df_cases_local_ma.to_numpy()
),
index=df_cases_local_ma.index,
)
# apply the mixture modelling and the adjustment to ensure we don't get negative
Rt[col_str] = np.maximum(0, (1 - omega) * Reff_local + omega * TP_local)
# store Rt in a dataframe
Rt =
|
pd.DataFrame.from_dict(Rt, orient="index", columns=df_cases_local_ma.index)
|
pandas.DataFrame.from_dict
|
import unittest
import pandas as pd
import os
from common import ProteinCollection, Analyses
p = ProteinCollection(multifasta_fn=r"C:\Users\localadmin\PycharmProjects\glycanProfiler\temp\Schulz_RP_WT_FLAG_HuNCCAM_20_718_8His_HuPST.FASTA")
filenames = ["ruby-1.xlsx", "ruby-2.xlsx", "ruby-3.xlsx"]
tempdf = {}
for i in filenames:
df = pd.read_excel(os.path.join("temp", i))
df = df[(df['Master Protein Accessions'].notnull()) &
df['Master Protein Accessions'].str.contains("RP2908_WT_FLAG_HuNCAM_20-718_8His") & (
~df['Area'].isnull()) & (df['Search Engine Rank'] == 1)]
tempdf[i] = df
class TestAnalysesMethods(unittest.TestCase):
def test_process(self):
for df_name in tempdf:
d = Analyses(tempdf[df_name], df_name, "RP2908_WT_FLAG_HuNCAM_20-718_8His", p, '(?=(N[^PX][ST]))',
job_id="1")
d.process()
self.assertGreater(len(d.processed_df.columns), len(tempdf[df_name].columns))
def test_compile(self):
for df_name in tempdf:
d = Analyses(tempdf[df_name], df_name, "RP2908_WT_FLAG_HuNCAM_20-718_8His", p, '(?=(N[^PX][ST]))',
job_id="1")
d.process()
summary = d.compile(1)
self.assertFalse(summary.empty)
def test_analyze(self):
summaries = []
for df_name in tempdf:
d = Analyses(tempdf[df_name], df_name, "RP2908_WT_FLAG_HuNCAM_20-718_8His", p, '(?=(N[^PX][ST]))',
job_id="1")
d.process()
summary = d.compile(1, True)
m_summary = summary.melt(['position'], summary.columns[1:])
m_summary["reps"] = pd.Series([df_name] * len(m_summary.index), index=m_summary.index)
m_summary["condition"] = pd.Series(["a"] * len(m_summary.index), index=m_summary.index)
summaries.append(m_summary)
results = pd.concat(summaries, ignore_index=True)
results['glycan_composition'] = results['variable']
results['experiments'] = results['value']
print(results)
for i, d in results.groupby(["condition"]):
fin = pd.pivot_table(d,
values=['experiments'],
index=['position'],
columns=['glycan_composition', 'reps'],
dropna=False)
writer =
|
pd.ExcelWriter('ruby.xlsx')
|
pandas.ExcelWriter
|
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
import lightgbm as lgb
import statsmodels.api as sm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
##############################################################################
dir = Path(__file__).parents[2]
input_path = dir / "input/"
estimation_path = dir / "src/estimation"
model_path = dir / "src/estimation/models"
from estimation.standard import data_birth, get_n_partners
from estimation.extended import data_general
##############################################################################
mortality = pd.read_csv(input_path / "mortality.csv")
# fertility = pd.read_csv(input_path / "fertility.csv")
##############################################################################
# Read in and transfrom fertility data
def scale_fertility():
df_fertility =
|
pd.read_csv(input_path / "fertility.csv")
|
pandas.read_csv
|
import os
import zipfile
import csv
import pandas as pd
import requests
import json
from itertools import islice
import sklearn.preprocessing
from lightfm.data import Dataset
import numpy as np
from lightfm import LightFM, lightfm
from lightfm.evaluation import auc_score
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
# *********************************************************************
def create_item_dict(df, id_col, name_col,author_col):
'''
Function to create an item dictionary based on their item_id and item name
Required Input -
- df = Pandas dataframe with Item information
- id_col = Column name containing unique identifier for an item
- name_col = Column name containing name of the item
Expected Output -
item_dict = Dictionary type output containing item_id as key and item_name as value
'''
item_dict = {}
for i in range(df.shape[0]):
item_dict[(df.loc[i, id_col])] = df.loc[i, name_col] +' : '+df.loc[i, author_col]
return item_dict
# *****************************************************************************************
def create_interaction_matrix(df, user_col, item_col, rating_col, norm=False, threshold=None):
'''
Function to create an interaction matrix dataframe from transactional type interactions
Required Input -
- df = Pandas DataFrame containing user-item interactions
- user_col = column name containing user's identifier
- item_col = column name containing item's identifier
- rating col = column name containing user feedback on interaction with a given item
- norm (optional) = True if a normalization of ratings is needed
- threshold (required if norm = True) = value above which the rating is favorable
Expected output -
- Pandas dataframe with user-item interactions ready to be fed in a recommendation algorithm
'''
interactions = df.groupby([user_col, item_col])[rating_col] \
.sum().unstack().reset_index(). \
fillna(0).set_index(user_col)
if norm:
interactions = interactions.applymap(lambda x: 1 if x > threshold else 0)
return interactions
# ************************************************************************************
def create_item_emdedding_distance_matrix(model, interactions):
'''
Function to create item-item distance embedding matrix
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
Expected Output -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
'''
df_item_norm_sparse = sparse.csr_matrix(model.item_embeddings)
similarities = cosine_similarity(df_item_norm_sparse)
item_emdedding_distance_matrix = pd.DataFrame(similarities)
item_emdedding_distance_matrix.columns = interactions.columns
item_emdedding_distance_matrix.index = interactions.columns
return item_emdedding_distance_matrix
# *****************************************************************************
def item_item_recommendation(item_emdedding_distance_matrix, item_id,
item_dict, n_items=10, show=True):
'''
Function to create item-item recommendation
Required Input -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
- item_id = item ID for which we need to generate recommended items
- item_dict = Dictionary type input containing item_id as key and item_name as value
- n_items = Number of items needed as an output
Expected Output -
- recommended_items = List of recommended items
'''
recommended_items = list(pd.Series(item_emdedding_distance_matrix.loc[item_id, :]. \
sort_values(ascending=False).head(n_items + 1). \
index[1:n_items + 1]))
if show == True:
print("Item of interest :{0}".format(item_dict[item_id]))
print("Item similar to the above item:")
counter = 1
for i in recommended_items:
print(str(counter) + '- ' + item_dict[i])
counter += 1
return recommended_items
def create_item_emdedding_distance_matrix(model, interactions):
'''
Function to create item-item distance embedding matrix
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
Expected Output -
- item_emdedding_distance_matrix = Pandas dataframe containing cosine distance matrix b/w items
'''
df_item_norm_sparse = sparse.csr_matrix(model.item_embeddings)
similarities = cosine_similarity(df_item_norm_sparse)
item_emdedding_distance_matrix = pd.DataFrame(similarities)
item_emdedding_distance_matrix.columns = interactions.columns
item_emdedding_distance_matrix.index = interactions.columns
return item_emdedding_distance_matrix
def sample_recommendation_user(model, interactions, user_id, user_dict,
item_dict, threshold=0, nrec_items=10, show=True):
'''
Function to produce user recommendations
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
- user_id = user ID for which we need to generate recommendation
- user_dict = Dictionary type input containing interaction_index as key and user_id as value
- item_dict = Dictionary type input containing item_id as key and item_name as value
- threshold = value above which the rating is favorable in new interaction matrix
- nrec_items = Number of output recommendation needed
Expected Output -
- Prints list of items the given user has already bought
- Prints list of N recommended items which user hopefully will be interested in
'''
n_users, n_items = interactions.shape
user_x = user_dict[user_id]
scores = pd.Series(model.predict(user_x, np.arange(n_items)))
scores.index = interactions.columns
scores = list(pd.Series(scores.sort_values(ascending=False).index))
known_items = list(pd.Series(interactions.loc[user_id, :] \
[interactions.loc[user_id, :] == threshold].index) \
.sort_values(ascending=False))
scores = [x for x in scores if x not in known_items]
return_score_list = scores[0:nrec_items]
known_items = list(pd.Series(known_items).apply(lambda x: item_dict[x]))
scores = list(
|
pd.Series(return_score_list)
|
pandas.Series
|
from itertools import product
from typing import List, Optional, Union
from mpl_format.figures import FigureFormatter
from numpy.ma import arange
from pandas import Series, DataFrame
from probability.distributions import Poisson, NegativeBinomial
from probability.distributions.conjugate.priors import VaguePrior
from probability.distributions.continuous.gamma import Gamma
from probability.distributions.mixins.conjugate import ConjugateMixin, \
PredictiveMixin
from probability.distributions.mixins.attributes import AlphaFloatMixin, \
BetaFloatMixin, NIntMixin, KIntMixin
from probability.utils import num_format
class GammaPoissonConjugate(
ConjugateMixin,
PredictiveMixin,
AlphaFloatMixin, BetaFloatMixin, NIntMixin, KIntMixin,
object
):
"""
Class for calculating Bayesian probabilities using the Gamma-Poisson
distribution.
Prior Hyper-parameters
----------------------
* `α` and `β` are the hyper-parameters of the Gamma prior.
* `α > 0`
* `β > 0`
* Interpretation is α total occurrences in β intervals.
Posterior Hyper-parameters
--------------------------
* `n` is the number of intervals.
* `k` is the number of occurrences.
Model parameters
----------------
* `P(k)` is the probability of observing k events in an interval.
* `k ≥ 0`
Links
-----
* https://en.wikipedia.org/wiki/Gamma_distribution
* https://en.wikipedia.org/wiki/Poisson_distribution
* https://en.wikipedia.org/wiki/Conjugate_prior
"""
def __init__(self, n: int, k: int,
alpha: float = VaguePrior.Gamma.alpha,
beta: float = VaguePrior.Gamma.beta):
"""
:param n: Number of intervals.
:param k: Number of occurrences.
:param alpha: Value for the α hyper-parameter of the prior Gamma
distribution (number of occurrences).
:param beta: Value for the β hyper-parameter of the prior Gamma
distribution (number of intervals).
"""
self._n: int = n
self._k: k = k
self._alpha: float = alpha
self._beta: float = beta
# region posterior hyper-parameters
@property
def alpha_prime(self) -> float:
return self._alpha + self._k
@property
def beta_prime(self) -> float:
return self._beta + self._n
# endregion
def prior(self) -> Gamma:
"""
Return a Gamma distribution reflecting the prior belief about the
distribution of the parameter λ, before seeing any data.
"""
return Gamma(
alpha=self._alpha, beta=self._beta
).with_y_label(
'$P(λ_{Poi}=x|'
'α_{Gam},'
'β_{Gam})$'
).prepend_to_label('Prior: ')
def likelihood(self) -> Poisson:
"""
Return a distribution reflecting the likelihood of observing
the data, under a Poisson model, independent of the prior belief
about the distribution of parameter λ.
"""
return Poisson(lambda_=self._k / self._n)
def posterior(self) -> Gamma:
"""
Return a Gamma distribution reflecting the posterior belief about the
distribution of the parameter λ, after observing the data.
"""
return Gamma(
alpha=self.alpha_prime, beta=self.beta_prime
).with_y_label(
r'$P(λ_{Poi}=x|'
r'α_{Gam}+k_{Obs},'
r'β_{Gam}+n_{Obs})$'
).prepend_to_label(
'Posterior: '
)
# region predictive
def prior_predictive(self) -> NegativeBinomial:
return NegativeBinomial(
r=self._alpha,
p=1 / (1 + self._beta)
).with_y_label(
r'$P(\tilde{X}=x|'
r'α_{Gam},'
r'β_{Gam})$'
)
def posterior_predictive(self) -> NegativeBinomial:
return NegativeBinomial(
r=self.alpha_prime,
p=1 / (1 + self.beta_prime)
).with_y_label(
r'$P(\tilde{X}=x|'
r'α_{Gam}+k_{Obs},'
r'β_{Gam}+n_{Obs})$'
)
# endregion
def plot(self, **kwargs):
"""
Plot a grid of the different components of the Compound Distribution.
:param kwargs: kwargs for plot methods
"""
ppf_gamma_prior = self.prior().ppf().at(0.99)
ppf_gamma_posterior = self.posterior().ppf().at(0.99)
x_gamma_max = int(max(ppf_gamma_prior, ppf_gamma_posterior)) + 1
x_gamma = arange(0, x_gamma_max + 0.001, 0.001)
ff = FigureFormatter(n_rows=2, n_cols=3)
(
ax_prior, ax_data, ax_posterior,
ax_prior_predictive, ax_likelihood, ax_posterior_predictive
) = ff.axes.flat
self.prior().plot(x=x_gamma, ax=ax_prior.axes, **kwargs)
self.posterior().plot(x=x_gamma, ax=ax_posterior.axes, **kwargs)
y_max_params = max(ax_prior.get_y_max(), ax_posterior.get_y_max())
ax_prior.set_y_lim(0, y_max_params)
ax_posterior.set_y_lim(0, y_max_params)
ppf_n_binom_prior = self.prior_predictive().ppf().at(0.99)
ppf_n_binom_posterior = self.prior_predictive().ppf().at(0.99)
k_pred = range(int(max(ppf_n_binom_prior, ppf_n_binom_posterior)) + 1)
self.prior_predictive().plot(
k=k_pred,
ax=ax_prior_predictive.axes,
**kwargs
)
self.posterior_predictive().plot(
k=k_pred,
ax=ax_posterior_predictive.axes,
**kwargs
)
y_max_pred = max(ax_prior_predictive.get_y_max(),
ax_posterior_predictive.get_y_max())
ax_prior_predictive.set_y_lim(0, y_max_pred)
ax_posterior_predictive.set_y_lim(0, y_max_pred)
ax_prior.set_title_text('prior').add_legend()
ax_posterior.set_title_text('posterior').add_legend()
ax_prior_predictive.set_title_text('prior predictive').add_legend()
ax_posterior_predictive.set_title_text(
'posterior predictive'
).add_legend()
# plot data
observations = Series(self.likelihood().rvs(self._n))
observations.plot.bar(ax=ax_data.axes, **kwargs)
ax_data.set_text(title='data', x_label='i', y_label='$X_i$')
# plot likelihood
k_poisson = range(int(self.likelihood().ppf().at(0.99)) + 2)
self.likelihood().plot(k=k_poisson, ax=ax_likelihood.axes)
ax_likelihood.set_title_text('likelihood')
ax_likelihood.add_legend()
return ff.figure
@staticmethod
def infer_posterior(data: Series,
alpha: float = VaguePrior.Gamma.alpha,
beta: float = VaguePrior.Gamma.beta) -> Gamma:
"""
Return a new Gamma distribution of the posterior most likely to generate
the given data.
:param data: Series of integers representing the number of occurrences
per interval.
:param alpha: Value for the α hyper-parameter of the prior Gamma
distribution (number of occurrences).
:param beta: Value for the β hyper-parameter of the prior Gamma
distribution (number of intervals).
"""
k: int = data.sum()
n: int = len(data)
return GammaPoissonConjugate(
n=n, k=k, alpha=alpha, beta=beta
).posterior()
@staticmethod
def infer_posteriors(
data: DataFrame,
prob_vars: Union[str, List[str]],
cond_vars: Union[str, List[str]],
alpha: float = VaguePrior.Gamma.alpha,
beta: float = VaguePrior.Gamma.beta,
stats: Optional[Union[str, dict, List[Union[str, dict]]]] = None
) -> DataFrame:
"""
Return a DataFrame mapping probability and conditional variables to Beta
distributions of posteriors most likely to generate the given data.
:param data: DataFrame containing discrete data.
:param prob_vars: Name(s) of poisson variables whose posteriors to
find probability of.
:param cond_vars: Names of discrete variables to condition on.
Calculations will be done for the cartesian product
of variable values
e.g if cA={1,2} and cB={3,4} then
cAB = {(1,3), (1, 4), (2, 3), (2, 4)}.
:param alpha: Value for the α hyper-parameter of each prior Gamma
distribution (number of occurrences).
:param beta: Value for the β hyper-parameter of each prior Gamma
distribution (number of intervals).
:param stats: Optional stats to append to the output e.g. 'alpha',
'median'. To pass arguments use a dict mapping stat
name to iterable of args.
:return: DataFrame with columns for each conditioning variable, a
'prob_var' column indicating the probability variable, a
`prob_val` column indicating the value of the probability
variable, and a `Beta` column containing the distribution.
"""
if isinstance(prob_vars, str):
prob_vars = [prob_vars]
if isinstance(cond_vars, str):
cond_vars = [cond_vars]
cond_products = product(
*[data[cond_var].unique() for cond_var in cond_vars]
)
if stats is not None:
if isinstance(stats, str) or isinstance(stats, dict):
stats = [stats]
else:
stats = []
gammas = []
# iterate over conditions
for cond_values in cond_products:
cond_data = data
cond_dict = {}
for cond_var, cond_value in zip(cond_vars, cond_values):
cond_data = cond_data.loc[cond_data[cond_var] == cond_value]
cond_dict[cond_var] = cond_value
for prob_var in prob_vars:
prob_dict = cond_dict.copy()
prob_dict['prob_var'] = prob_var
gamma = GammaPoissonConjugate.infer_posterior(
data=cond_data[prob_var],
alpha=alpha, beta=beta
)
prob_dict['Gamma'] = gamma
for stat in stats:
prob_dict = {**prob_dict, **gamma.stat(stat, True)}
gammas.append(prob_dict)
gammas_data =
|
DataFrame(gammas)
|
pandas.DataFrame
|
import dash_bootstrap_components as dbc
from dash import Dash, Input, Output, State, html, dcc, dash_table, callback, dependencies
import pandas as pd
import numpy as np
df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/solar.csv')
df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/solar.csv')
df1 =
|
pd.concat([df1]*3, ignore_index=True)
|
pandas.concat
|
import pandas as pd
import numpy as np
from datetime import datetime
from .mappings import map_registryCode_inv, map_account_type_inv, map_unitType_inv, export_mappings
import os
import glob
# account information added by hand
NEW_ACC = [{"accountIDEutl": 111264,
"name": "EU Credit Exchange Account - Aviation",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111265,
"name": "EU Credit Exchange Account - Aviation",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111267,
"name": "EU Credit Exchange Account",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"},
{"accountIDEutl": 111266,
"name": "EU Credit Exchange Account",
'registry_id': "EU",
"openingDate": pd.to_datetime("2014-01-29"),
"isOpen": True,
"accountType_id": "100-23"}]
def create_csv_tables(dir_in, dir_out, fn_coordinates=None,
fn_nace=None, fn_nace_codes=None):
"""Create all tables
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
:param fn_coordinates: <string> path to file with installation coordinates
:param fn_nace: <string> name of file with nace codes for installations
if None, NACE codes are not processed
:param fn_nace_codes: <string> name of file with nace classification scheme
If None, calssification lookup not exported
"""
print("####### Create lookup tables")
create_tables_lookup(dir_in, dir_out, fn_nace_codes=fn_nace_codes)
print("####### Create installation tables")
create_table_installation(dir_in, dir_out,
fn_coordinates=fn_coordinates,
fn_nace=fn_nace)
create_table_compliance(dir_in, dir_out)
create_table_surrender(dir_in, dir_out)
print("####### Create account tables")
create_table_accountHolder(dir_in, dir_out)
create_table_account(dir_in, dir_out)
print("####### Create transcation tables")
create_table_transaction(dir_in, dir_out)
def create_table_installation(dir_in, dir_out, fn_coordinates=None,
fn_nace=None):
"""Create installation table
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
:param fn_coordinates: <string> name of file with coordinates
:param fn_nace: <string> name of file with nace codes
if None, NACE codes are not processed
"""
# get data: installation data together with addresses with updated coordinates
# and entitlements
df_inst = pd.read_csv(dir_in + "installations.csv",)
df_enti = pd.read_csv(dir_in + "entitlements.csv", na_values=["Not Applicable", "Not Set"])
df_enti["installationID_new"] = df_enti.registry.map(lambda x: map_registryCode_inv.get(x))
df_enti["installationID"] = df_enti["installationID_new"] + "_" + df_enti["installationID"].map(str)
df_enti = df_enti[["installationID", "euEntitlement", "chEntitlement"]].copy()
df_inst = df_inst.merge(df_enti, on="installationID", how="left")
# transform dataframe to be consistent with Installation object
cols_inst = {'installationID': 'id',
"name": "name",
'registryCode': 'registry_id',
'activity': 'activity_id',
'eprtrID': 'eprtrID',
'parent': 'parentCompany',
'subsidiary': 'subsidiaryCompany',
'permitID': 'permitID',
'icaoID': 'designatorICAO',
'monitoringPlanId': 'monitoringID',
'monitoringPlanExpiry': 'monitoringExpiry',
'monitoringPlanFirstYear': 'monitoringFirstYear',
'permitExpiry': 'permitDateExpiry',
'isAircraftOperator': 'isAircraftOperator',
'ec7482009ID': 'ec748_2009Code',
'permitEntryDate': 'permitDateEntry',
'mainAddress': 'mainAddress',
'secondaryAddress': 'secondaryAddress',
'postalCode': 'postalCode',
'city': 'city',
'country': 'country_id',
'latitude': 'latitudeEutl',
'longitude': 'longitudeEutl',
"euEntitlement": "euEntitlement",
"chEntitlement": "chEntitlement",
}
df_inst_to_tbl = df_inst[[c for c in cols_inst.keys() if c in df_inst.columns]].copy()
df_inst_to_tbl = df_inst_to_tbl.rename(columns=cols_inst)
# convert activity id to id only (without description)
df_inst_to_tbl.activity_id = df_inst_to_tbl.activity_id.map(lambda x: int(x.split("-")[0]))
if fn_coordinates is not None:
df_ = pd.read_csv(fn_coordinates,
names=["id", "latitudeGoogle", "longitudeGoogle"],
usecols=["id", "latitudeGoogle", "longitudeGoogle"],
header=0)
df_inst_to_tbl = df_inst_to_tbl.merge(df_, on="id", how="left")
# add nace codes
if fn_nace:
# primarily use 2020 leakage list but fill with 15
df_ = pd.read_csv(fn_nace, usecols=["id", "nace15", "nace20"],
dtype={"nace15": "str", "nace20": "str"}).drop_duplicates()
df_["nace_id"] = df_.nace20.fillna(df_.nace15)
df_ = df_.rename(columns={"nace15": "nace15_id", "nace20": "nace20_id"})
df_inst_to_tbl = df_inst_to_tbl.merge(df_, on="id", how="left")
# for aircraft add the nace code 51 (Air transport)
df_inst_to_tbl.loc[df_inst_to_tbl.isAircraftOperator, "nace_id"] = df_inst_to_tbl.loc[df_inst_to_tbl.isAircraftOperator, "nace_id"].fillna(51)
# add created timestamp
df_inst_to_tbl["created_on"] = datetime.now()
df_inst_to_tbl["updated_on"] = datetime.now()
# export to csv
df_inst_to_tbl.to_csv(dir_out + "installations.csv", index=False, encoding="utf-8")
return
def create_table_compliance(dir_in, dir_out):
"""Create table with compliance data
:param dir_in: <string> directory with parsed data
:param dir_out: <string> output directory
"""
# get data
df_comp =
|
pd.read_csv(dir_in + "compliance.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 12:22:06 2019
@author: YASH
"""
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import csv
import pandas as pd
import numpy
#Data cleaning Functions:
def isEnglish(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
#The following function removes the part of the string that contains the substring eg. if
#substring = 'http' , then http://www.google.com is removed, that means, remove until a space is found
def rem_substring(tweets,substring):
m=0;
for i in tweets:
if (substring in i):
#while i.find(substring)!=-1:
k=i.find(substring)
d=i.find(' ',k,len(i))
if d!=-1: #substring is present somwhere in the middle(not the end of the string)
i=i[:k]+i[d:]
else: #special case when the substring is present at the end, we needn't append the
i=i[:k] #substring after the junk string to our result
tweets[m]=i #store the result in tweets "list"
m+= 1
return tweets
def removeNonEnglish(tweets):
result=[]
for i in tweets:
if isEnglish(i):
result.append(i)
return result
#the following function converts all the text to the lower case
def lower_case(tweets):
result=[]
for i in tweets:
result.append(i.lower())
return result
def rem_punctuation(tweets):
#print(len(tweets))
m=0
validLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ "
for i in tweets:
x = ""
for j in i:
if (j in validLetters)==True:
x += j
tweets[m]=x
m=m+1
return tweets
def stop_words(tweets):
#Removal of Stop words like is, am , be, are, was etc.
stop_words1 = set(stopwords.words('english'))
indi=0
for tweet in tweets:
new_s=[]
Br_tweet = word_tokenize(tweet)
for word in Br_tweet:
if (word not in stop_words1):
new_s.append(word)
et=" ".join(new_s)
tweets[indi]=et
indi+=1
return tweets
def score(college_name):
filename = 'data_emotions_words_list.csv'
pos_file_name= "Pos_tagged_" + college_name + ".csv"
POS=pd.read_csv(pos_file_name)
POS_tweets=POS['POS_Tweet'].values
adverb1=pd.read_csv("adverb.csv")
verb1=pd.read_csv("verb.csv")
''' Verb and adverb are dictionaries having values for verbs and adverbs'''
verb={};adverb={}
l=adverb1['value'].values
j=0
for i in adverb1['adverb'].values:
adverb[i]=l[j]
j+=1
l=verb1['Value'].values
j=0
for i in verb1['Verb'].values:
verb[i]=l[j]
j+=1
''' Add the adjectives in the dictionary'''
Adjectives={}
df=pd.read_csv("data_emotions_words_list.csv")
for i in range(len(df)) :
Adjectives[df.loc[i, "Words"]]= [df.loc[i, "Happiness"],df.loc[i, "Anger"],df.loc[i, "Sadness"],df.loc[i, "Fear"],df.loc[i, "Disgust"]]
''' Assign Scores to each tweet'''
FINAL={};FINAL1={'Tweets':[],'Happiness':[],'Sadness':[],'Fear':[],'Disgust':[],'Anger':[],'Sentiment':[]}
for tweet in POS_tweets:
sum_adverb=0;sum_verb=0
score_list=[]
words=word_tokenize(tweet)
stem=stemming(words)
f_stem=0
for i in words :
if (i in adverb):
sum_adverb+=adverb[i]
elif (stem[f_stem] in adverb):
sum_adverb+=adverb[stem[f_stem]]
elif (i in verb):
sum_verb+=verb[i]
elif (stem[f_stem] in verb):
sum_verb+=verb[stem[f_stem]]
else:
if (i in Adjectives ) or (stem[f_stem] in Adjectives):
if i in Adjectives:
# ADJ=[Happiness,Anger,Sadness,Fear,disgust]
ADJ=Adjectives[i]
elif (stem[f_stem] in Adjectives):
ADJ=Adjectives[stem[f_stem]]
else:
pass
# Calculate Score
c=sum_adverb + sum_verb
#The formula is derived from the research paper
if (c) <0 :
for j in range(len(ADJ)):
ADJ[j]=5.0-ADJ[j]
elif (c>=0.5):
for j in range(len(ADJ)):
ADJ[j]=c*ADJ[j]
else:
for j in range(len(ADJ)):
ADJ[j]=0.5*ADJ[j]
score_list.append(ADJ)
sum_adverb=0;sum_verb=0
f_stem+=1
total_adj=len(score_list)
s=[0.0 for i in range(5)]
emo=''
if (total_adj != 0):
for i in score_list:
s[0]+=i[0] #Happiness
s[1]+=i[1]#Anger
s[2]+=i[2] #Sadness
s[3]+=i[3] #Fear
s[4]+=i[4] #Disgust
for i in range(len(s)):
s[i]= "{0:.6f}".format(s[i]/total_adj)
emotion=0.0
for i in range(len(s)):
if (s[i]> emotion):
emotion=max(emotion,s[i])
if i==0 :
emo='Happiness'
elif i==1:
emo='Anger'
elif i==2:
emo='Sadness'
elif i==3:
emo='Fear'
elif i==4:
emo='Disgust'
else:
# if adj is not in vocabulary assign
s=[0.2000 for i in range(5)]
emo='Neutral'
#find the Max emotion value for the tweet
s.append(emo)
#Add the final tweet and score
FINAL[tweet]=s
FINAL1['Tweets'].append(tweet)
FINAL1['Happiness'].append(s[0])
FINAL1['Anger'].append(s[1])
FINAL1['Fear'].append(s[3])
FINAL1['Sadness'].append(s[2])
FINAL1['Disgust'].append(s[4])
FINAL1['Sentiment'].append(s[5])
DB=pd.DataFrame(FINAL1,columns=['Tweets','Happiness','Anger','Fear','Sadness','Disgust','Sentiment'])
file_name = "FINAL_" + college_name + "_SENTIMENTS.csv"
DB.to_csv(file_name)
#POS Tagger Function used to identify the adjectives, verbs, adverbs.
def POS_tagger(tweets, username):
final = []
# for each line in tweets list
for line in tweets:
t = []
# for each sentence in the line
# tokenize this sentence
text= word_tokenize(line)
k = nltk.pos_tag(text)
for i in k:
# Only Verbs, Adverbs & Adjectives are Considered
if ((i[1][:2] == "VB") or (i[1][:2] == "JJ") or (i[1][:2] == "RB")):
t.append(i[0])
one_tweet=" ".join(t)
if (len(one_tweet)>0):
final.append(one_tweet)
dict1={'POS_Tweet':final}
db1=pd.DataFrame(dict1)
filename = "Pos_tagged_" + username + ".csv"
db1.to_csv(filename)
def stemming(tweets):
# Find the root word
# stemming of words
porter = PorterStemmer()
stemmed = [porter.stem(word) for word in tweets]
return stemmed
def main():
c=raw_input("Enter the name of the raw_tweets college:")
c_f=c+'_tweets.csv'
db=
|
pd.read_csv(c_f)
|
pandas.read_csv
|
"""Tests for _data_reading.py"""
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import primap2
import primap2.pm2io as pm2io
import primap2.pm2io._conversion
from primap2.pm2io._data_reading import additional_coordinate_metadata
from .utils import assert_ds_aligned_equal
DATA_PATH = Path(__file__).parent / "data"
@pytest.mark.parametrize(
"unit, entity, expected_attrs",
[
("Mt", "CO2", {"units": "Mt", "entity": "CO2"}),
(
"Gg CO2",
"KYOTOGHG (AR4GWP100)",
{
"units": "Gg CO2",
"entity": "KYOTOGHG",
"gwp_context": "AR4GWP100",
},
),
(
"kg CO2",
"CH4 (SARGWP100)",
{
"units": "kg CO2",
"entity": "CH4",
"gwp_context": "SARGWP100",
},
),
],
)
def test_metadata_for_variable(unit, entity, expected_attrs):
assert (
pm2io._interchange_format.metadata_for_variable(unit, entity) == expected_attrs
)
def assert_attrs_equal(attrs_result, attrs_expected):
assert attrs_result.keys() == attrs_expected.keys()
assert attrs_result["attrs"] == attrs_expected["attrs"]
assert attrs_result["time_format"] == attrs_expected["time_format"]
assert attrs_result["dimensions"].keys() == attrs_expected["dimensions"].keys()
for entity in attrs_result["dimensions"]:
assert set(attrs_result["dimensions"][entity]) == set(
attrs_expected["dimensions"][entity]
)
@pytest.fixture
def coords_cols():
return {
"unit": "unit",
"entity": "gas",
"area": "country",
"category": "category",
"sec_cats__Class": "classification",
}
@pytest.fixture
def add_coords_cols():
return {"category_name": ["category_name", "category"]}
@pytest.fixture
def coords_defaults():
return {
"source": "TESTcsv2021",
"sec_cats__Type": "fugitive",
"scenario": "HISTORY",
}
@pytest.fixture
def coords_terminologies():
return {
"area": "ISO3",
"category": "IPCC2006",
"sec_cats__Type": "type",
"sec_cats__Class": "class",
"scenario": "general",
}
@pytest.fixture
def coords_value_mapping():
return {
"category": "PRIMAP1",
"entity": "PRIMAP1",
"unit": "PRIMAP1",
}
@pytest.fixture
def coords_value_filling():
return {
"category": { # col to fill
"category_name": { # col to fill from
"Energy": "1", # from value: to value
"IPPU": "2",
}
}
}
@pytest.fixture
def filter_keep():
return {
"f1": {"category": ["IPC0", "IPC2"]},
"f2": {"classification": "TOTAL"},
}
@pytest.fixture
def filter_remove():
return {"f1": {"gas": "CH4"}, "f2": {"country": ["USA", "FRA"]}}
class TestReadWideCSVFile:
def test_output(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
meta_data = {"references": "Just ask around."}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
meta_data=meta_data,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"references": "Just ask around.",
"sec_cats": ["Class (class)", "Type (type)"],
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"Type (type)",
"unit",
"scenario (general)",
"Class (class)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_no_sec_cats(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_add_coords(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data_category_name.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
"additional_coordinates": {"category_name": "category (IPCC2006)"},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_read_wide_fill_col(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
coords_value_filling,
):
file_input = DATA_PATH / "test_csv_data_category_name_fill_cat_code.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
coords_value_filling=coords_value_filling,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
"additional_coordinates": {"category_name": "category (IPCC2006)"},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_entity_terminology(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected: pd.DataFrame = pd.read_csv(file_expected, index_col=0)
df_expected.rename(columns={"entity": "entity (PRIMAP1)"}, inplace=True)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
coords_terminologies["entity"] = "PRIMAP1"
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
"entity_terminology": "PRIMAP1",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity (PRIMAP1)",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_coords_value_mapping_dict(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
coords_value_mapping = {
"category": {"IPC1": "1", "IPC2": "2", "IPC3": "3", "IPC0": "0"},
"entity": {"KYOTOGHG": "KYOTOGHG (SARGWP100)"},
"unit": "PRIMAP1",
}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_entity_default(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_entity_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["entity"]
del coords_value_mapping["entity"]
coords_defaults["entity"] = "CO2"
del filter_remove["f1"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_unit_default(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_unit_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["unit"]
coords_defaults["unit"] = "Gg"
filter_remove["f1"] = {"gas": "KYOTOGHG"}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "test.csv")
df_result = pd.read_csv(tmp_path / "test.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_function_mapping(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output_unit_def.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["unit"]
coords_defaults["unit"] = "Gg"
coords_value_mapping[
"category"
] = pm2io._conversion.convert_ipcc_code_primap_to_primap2
filter_remove["f1"] = {"gas": "KYOTOGHG"}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
df_result.to_csv(tmp_path / "test.csv")
df_result = pd.read_csv(tmp_path / "test.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
def test_col_missing(
self,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_cols["sec_cats__Class"] = "class"
with pytest.raises(ValueError, match="Columns {'class'} not found in CSV."):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
)
def test_unknown_mapping(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_value_mapping["category"] = "non-existing"
with pytest.raises(
ValueError,
match="Unknown metadata mapping 'non-existing' for column 'category'.",
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_overlapping_specification(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_defaults["entity"] = "CO2"
with pytest.raises(
ValueError, match="{'entity'} given in coords_cols and coords_defaults."
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_overlapping_specification_add_coords(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
add_coords_cols = {"test": ["gas", "category"]}
with pytest.raises(
ValueError, match="{'gas'} given in coords_cols and add_coords_cols."
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_no_unit(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
del coords_cols["unit"]
with pytest.raises(ValueError, match="Mandatory dimension 'unit' not defined."):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_no_entity(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
del coords_cols["entity"]
with pytest.raises(
ValueError, match="Mandatory dimension 'entity' not defined."
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_unknown_category_mapping(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_value_mapping["category"] = "TESTTEST"
with pytest.raises(
ValueError,
match="Unknown metadata mapping 'TESTTEST' for column 'category'.",
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_unknown_entity_mapping(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_value_mapping["entity"] = "TESTTEST"
with pytest.raises(
ValueError, match="Unknown metadata mapping 'TESTTEST' for column 'entity'."
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
def test_unknown_coordinate(
self, coords_cols, coords_defaults, coords_terminologies, coords_value_mapping
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
coords_defaults["citation"] = "this should go to attrs"
with pytest.raises(
ValueError,
match="'citation' given in coords_defaults is unknown - prefix with "
"'sec_cats__' to add a secondary category.",
):
pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
class TestInterchangeFormat:
def test_from(self):
file_input = DATA_PATH / "test_read_wide_csv_file_output.csv"
file_expected = DATA_PATH / "test_from_interchange_format_output.nc"
ds_expected = primap2.open_dataset(file_expected)
df_input = pd.read_csv(file_input, index_col=0)
dims = [
"area (ISO3)",
"category (IPCC2006)",
"scenario (general)",
"Class (class)",
"Type (type)",
"unit",
"entity",
"source",
]
attrs = {
"attrs": {
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
"scen": "scenario (general)",
"sec_cats": ["Class (class)", "Type (type)"],
},
"time_format": "%Y",
"dimensions": {"*": dims},
}
ds_result = pm2io.from_interchange_format(df_input, attrs)
assert_ds_aligned_equal(ds_result, ds_expected, equal_nan=True)
def test_from_too_large(self, caplog):
df = pd.DataFrame(
{
"a": np.arange(10),
"b": np.arange(10),
"c": np.arange(10),
"entity": ["CO2"] * 10,
"unit": ["Gg"] * 10,
"2001": np.arange(10),
}
)
df.attrs = {
"attrs": {},
"dimensions": {"CO2": ["a", "b", "c"]},
"time_format": "%Y",
}
# projected array size should be 1000 > 100
with pytest.raises(ValueError, match="Resulting array too large"):
pm2io.from_interchange_format(df, max_array_size=100)
assert "ERROR" in caplog.text
assert (
"Set with 1 entities and a total of 3 dimensions will have a size of 1,000"
in caplog.text
)
def test_from_add_coord_non_unique(self, caplog):
df = pd.DataFrame(
{
"a": np.arange(3),
"b": np.arange(3),
"c": np.arange(3),
"entity": ["CO2"] * 3,
"entity_name": ["Carbondioxide", "Carbondioxide", "Methane"],
"unit": ["Gg"] * 3,
"2001": np.arange(3),
}
)
df.attrs = {
"attrs": {},
"dimensions": {"CO2": ["a", "b", "c"]},
"time_format": "%Y",
"additional_coordinates": {"entity_name": "entity"},
}
with pytest.raises(
ValueError,
match="Different secondary coordinate values "
"for given first coordinate value for "
"entity_name.",
):
pm2io.from_interchange_format(df)
assert "ERROR" in caplog.text
assert (
"Different secondary coordinate values for given first coordinate "
"value for entity_name." in caplog.text
)
def test_roundtrip(self, tmp_path):
file_input = DATA_PATH / "test_read_wide_csv_file_output.csv"
file_temp = tmp_path / "test_interchange_format"
data = pd.read_csv(file_input, index_col=0, dtype=object)
attrs = {
"attrs": {
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
"scen": "scenario (general)",
"sec_cats": ["Class (class)", "Type (type)"],
},
"time_format": "%Y",
"dimensions": {"CO2": ["area (ISO3)"]},
}
pm2io.write_interchange_format(file_temp, data, attrs)
read_data = pm2io.read_interchange_format(file_temp)
read_attrs = read_data.attrs
assert read_attrs == attrs
pd.testing.assert_frame_equal(data, read_data)
class TestLong:
def test_compare_wide(
self,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input_wide = DATA_PATH / "test_csv_data.csv"
file_input_long = DATA_PATH / "long.csv"
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
meta_data = {"references": "Just ask around"}
df_result_wide = pm2io.read_wide_csv_file_if(
file_input_wide,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
meta_data=meta_data,
)
coords_cols["time"] = "year"
coords_cols["data"] = "emissions"
df_result_long = pm2io.read_long_csv_file_if(
file_input_long,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
meta_data=meta_data,
time_format="%Y",
)
pd.testing.assert_frame_equal(df_result_wide, df_result_long)
assert df_result_wide.attrs == df_result_long.attrs
def test_compare_wide_entity_terminology(
self,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input_wide = DATA_PATH / "test_csv_data.csv"
file_input_long = DATA_PATH / "long.csv"
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
coords_terminologies["entity"] = "PRIMAP1"
df_result_wide = pm2io.read_wide_csv_file_if(
file_input_wide,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
coords_cols["time"] = "year"
coords_cols["data"] = "emissions"
df_result_long = pm2io.read_long_csv_file_if(
file_input_long,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
time_format="%Y",
)
pd.testing.assert_frame_equal(df_result_wide, df_result_long)
assert df_result_wide.attrs == df_result_long.attrs
def test_compare_wide_add_cols(
self,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input_wide = DATA_PATH / "test_csv_data_category_name.csv"
file_input_long = DATA_PATH / "test_csv_data_category_name_long.csv"
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result_wide = pm2io.read_wide_csv_file_if(
file_input_wide,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
coords_cols["time"] = "year"
coords_cols["data"] = "emissions"
df_result_long = pm2io.read_long_csv_file_if(
file_input_long,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
time_format="%Y",
)
|
pd.testing.assert_frame_equal(df_result_wide, df_result_long)
|
pandas.testing.assert_frame_equal
|
from rest_framework import permissions, status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from datetime import date, datetime, timedelta
from django.forms.models import model_to_dict
from django.db.models import Q, Count, F, Sum
from django.db.models.functions import TruncWeek, TruncMonth, TruncYear
from django.apps import apps
from django.core.files.storage import default_storage
from .serializers import *
from .models import *
from .content_based_recommender import ContentBasedRecommender
from .utils import *
from pathlib import Path
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from slugify import slugify
import pandas as pd
import random
import json
import uuid
import os
import pydash
import urllib3
import dotenv
# Read configure file
base_dir = Path(__file__).resolve().parent.parent
module_dir = os.path.dirname(__file__)
mapping_template_file_path = os.path.join(module_dir, 'configuration/mapping_template.json')
schema_table_file_path = os.path.join(module_dir, 'configuration/schema_table.json')
schema_detail_file_path = os.path.join(module_dir, 'configuration/schema_detail.json')
ga4_json = os.path.join(module_dir, 'configuration/ga4.json')
ua_json = os.path.join(module_dir, 'configuration/ua.json')
# Initialize environment variables
dotenv.load_dotenv(os.path.join(base_dir, '.env'))
# Global vaos.environrial
API_KEY = os.environ['API_KEY']
IP_DOMAIN = os.environ['IP_DOMAIN']
scope = 'https://www.googleapis.com/auth/analytics.readonly'
dimensions = ['date', 'eventName', 'pageLocation', 'browser', 'deviceCategory', 'operatingSystem', 'country']
metrics = ['eventCount', 'sessions']
ua_dimensions = ['ga:date', 'ga:eventCategory', 'ga:pagePath', 'ga:browser', 'ga:deviceCategory', 'ga:operatingSystem', 'ga:country']
ua_metrics = ['ga:totalEvents', 'ga:sessions']
@api_view(['GET'])
def home(request):
try:
# Initialize KPI reports
web_activity_report = []
event_report = []
product_report = []
traffics = {}
# Total number of web activities (interactions)
web_activities_file = len(Interaction_f.objects.all())
web_activities_ga = Interaction_ga.objects.all().aggregate(Sum('event_count'))['event_count__sum']
if (web_activities_ga is None):
web_activities_ga = 0
web_activities = web_activities_file + web_activities_ga
# Total number of sessions (a session includes multiple interactions)
sessions_file = len(Interaction_f.objects.values('session_id').distinct())
sessions_ga = Interaction_ga.objects.all().aggregate(Sum('session_count'))['session_count__sum']
if (sessions_ga is None):
sessions_ga = 0
sessions = sessions_file + sessions_ga
# Total number of web activities by page location
pages_file = Interaction_f.objects.all().values('page_location').annotate(total=Count('page_location'))
pages_ga = Interaction_ga.objects.all().values('page_location').annotate(total=Sum('event_count'))
pages = list(pages_file) + list(pages_ga)
if (len(pages)):
pages = pd.DataFrame(pages).groupby(['page_location'], as_index=False).sum().to_dict('r')
pages = sorted(pages, key=lambda k : k['total'], reverse=True)
# Total number of web activities by device categories
device_categories_file = Interaction_f.objects.all().values('device_category').annotate(total=Count('device_category'))
device_categories_ga = Interaction_ga.objects.all().values('device_category').annotate(total=Sum('event_count'))
device_categories = list(device_categories_ga) + list(device_categories_file)
for category in list(device_categories):
type = category['device_category']
if (type not in traffics):
traffics[type] = 0
traffics[type] += category['total']
# Web activities report - Total number of web activities by event name
web_activity_data_file = Interaction_f.objects.all().values('event_name').annotate(total=Count('event_name'))
web_activity_data_ga = Interaction_ga.objects.all().values('event_name').annotate(total=Sum('event_count'))
web_activity_data = list(web_activity_data_file) + list(web_activity_data_ga)
if (len(web_activity_data)):
web_activity_data = pd.DataFrame(web_activity_data).groupby(['event_name'], as_index=False).sum().to_dict('r')
web_activity_report = [(item['event_name'], item['total']) for item in list(web_activity_data)]
# Cultural event report - Total number of cultural events by event type
event_data = Events.objects.all().values('event_type').annotate(total=Count('event_type'))
event_report = [(item['event_type'], item['total']) for item in list(event_data)]
# Cutural product report - Total number of cultural products by product type
product_data = Products.objects.all().values('product_type').annotate(total=Count('product_type'))
product_report = [(item['product_type'], item['total']) for item in list(product_data)]
# Add info for report to generate charts
reports = [
{
'id': 'activity-chart',
'title': 'Statistiques d’activités Web par types',
'data': web_activity_report,
'type': 'pie',
},
{
'id': 'event-chart',
'title': 'Statistiques d’événements par types',
'data': event_report,
'type': 'column'
},
{
'id': 'product-chart',
'title': 'Statistiques d’articles par types',
'data': product_report,
'type': 'column'
},
]
return Response({'reports': reports,
'sessions': sessions,
'webActivities': web_activities,
'traffic': traffics,
'pages': pages}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
class ItemList(APIView):
# Get list of items (all rows) from a table
def get(self, request, item_type):
try:
import_id = request.GET.get('importId', None)
# Read config file
item_list_schema = get_json_info(schema_table_file_path, item_type)
# Get info (model_name of item, list required fields to show, ...)
model_name = item_list_schema['model_name']
fields = item_list_schema['fields']
view_detail = item_list_schema['view_detail']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
if (import_id is not None):
items = Model.objects.filter(import_id=import_id).values(*fields)
else:
items = Model.objects.all().values(*fields)
return Response({
'items': items,
'isViewDetail': view_detail,
}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
class ItemDetail(APIView):
# Get item detail (detail of a row) from a table
def get(self, request, item_type, pk, format=None):
try:
# Read config file
item_detail_schema = get_json_info(schema_detail_file_path, item_type)
item_detail = get_item_detail_form(pk, item_detail_schema)
return Response(item_detail)
except Exception as exception:
return Response({'message': exception})
# Update info
def put(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
update_item_info(item_form)
return Response({'message': 'Update successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# Delete info
def delete(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
delete_item_info(item_form)
return Response({'message': 'Delete successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# New info
def post(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
update_item_info(item_form)
return Response({'message': 'Create successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# Get data(row) from a table(model)
def get_model_object(model_name, pk):
if (pk != 'form'):
try:
Model = apps.get_model(app_label='dimadb', model_name=model_name)
event = Model.objects.get(id=pk)
return model_to_dict(event)
except Model.DoesNotExist:
return {}
else:
return {}
# Get all information of an object from several tables (event information coming from event, geolocation, ...)
def get_item_detail_form(pk, schema_detail):
form_attributes = {}
# Get info from schema_detail
model_name = schema_detail['model_name']
fields = schema_detail['fields']
m2m_tables = []
o2m_tables = []
if ('m2m_tables' in schema_detail.keys()):
m2m_tables = schema_detail['m2m_tables']
if ('o2m_tables' in schema_detail.keys()):
o2m_tables = schema_detail['o2m_tables']
# Query item from db
Model = apps.get_model(app_label='dimadb', model_name=model_name)
obj = get_model_object(model_name, pk)
if ('id' in obj.keys()):
obj_id = obj['id']
else:
obj_id = None
# List attributes consists field names in primary table
for field in fields:
form_attributes[field] = {}
attribute_type = Model._meta.get_field(field).get_internal_type()
attribute_choices = Model._meta.get_field(field).choices
# Assign value for each field of item
if (field in obj.keys()):
form_attributes[field]['value'] = obj[field]
else:
form_attributes[field]['value'] = ''
# Assign data type for each field of item
if (attribute_choices != None):
form_attributes[field]['type'] = 'select'
form_attributes[field]['choices'] = [
value for (value, name) in attribute_choices]
else:
if (attribute_type == 'IntegerField'):
form_attributes[field]['type'] = 'integer'
elif (attribute_type == 'DecimalField'):
form_attributes[field]['type'] = 'decimal'
elif (attribute_type == 'TextField'):
form_attributes[field]['type'] = 'textarea'
elif (attribute_type == 'DateTimeField' or attribute_type == 'DateField'):
form_attributes[field]['type'] = 'date'
if form_attributes[field]['value'] == '' or form_attributes[field]['value'] is None:
form_attributes[field]['value'] = ''
else:
form_attributes[field]['value'] = form_attributes[field]['value'].strftime(
"%Y-%m-%d")
else:
form_attributes[field]['type'] = 'text'
# List o2m tables conists additional info of item (geolocation, resource, etc.)
# Ex: evet - eventpreference(o2m)
for o2m_table in o2m_tables:
o2m_display_name = o2m_table['display_name']
connected_field = o2m_table['connected_field']
# Get list of rows in o2m table
form_attributes[o2m_display_name] = {}
form_attributes[o2m_display_name]['type'] = 'o2m'
form_attributes[o2m_display_name]['value'] = get_o2m_items(o2m_table, obj_id)
element_attributes = get_item_detail_form('form', o2m_table)
element_attributes['connected_field'] = connected_field
form_attributes[o2m_display_name]['elementAttributes'] = element_attributes
form_info = {
'type': 'object',
'id': uuid.uuid4(),
'attributes': form_attributes,
'removed': False,
'status': 'new' if pk == 'form' else 'created',
'name': model_name
}
# List m2m tables consists additional info of item (geolocation, resource, etc.)
# Ex: event - eventlocation(connected_table, who hold 2 primary keys of two tables) - geolocation(m2m)
for m2m_table in m2m_tables:
# Get config info
m2m_display_name = m2m_table['display_name']
connected_table = m2m_table['connected_table']
connected_field1 = m2m_table['connected_field1']
connected_field2 = m2m_table['connected_field2']
# Get list of rows in m2m table
form_attributes[m2m_display_name] = {}
form_attributes[m2m_display_name]['type'] = 'm2m'
form_attributes[m2m_display_name]['value'] = get_m2m_items(m2m_table, obj_id)
# Create an empty form info for m2m table
element_attributes = get_item_detail_form('form', m2m_table)
element_attributes['connectedAttributes'] = get_item_detail_form('form', connected_table)
element_attributes['connectedAttributes']['connected_field1'] = connected_field1
element_attributes['connectedAttributes']['connected_field2'] = connected_field2
form_attributes[m2m_display_name]['elementAttributes'] = element_attributes
return form_info
# Update item based on form sent from GUI
def update_item_info(form_info, connected_field1_id=None):
status = form_info['status']
obj_id = form_info['attributes']['id']['value']
obj_info = filter_form_object_info(form_info['attributes'])
model_name = form_info['name']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
if ('connected_field' in form_info.keys()):
connected_field = form_info['connected_field']
obj_info[connected_field] = connected_field1_id
if (status == 'new'): # If new info created
new_obj = Model(**obj_info)
new_obj.save()
update_multiple_items('m2m', form_info['attributes'], new_obj.id)
update_multiple_items('o2m', form_info['attributes'], new_obj.id)
if ('connectedAttributes' in form_info.keys()):
connected_field2_id = new_obj.id
create_connected_object(form_info['connectedAttributes'], connected_field1_id, connected_field2_id)
elif (status == 'created'): # If info updated
Model.objects.filter(id=obj_id).update(**obj_info)
updated_obj = Model.objects.get(id=obj_id)
update_multiple_items('m2m', form_info['attributes'], updated_obj.id)
update_multiple_items('o2m', form_info['attributes'], updated_obj.id)
if ('connectedAttributes' in form_info.keys()):
update_item_info(form_info['connectedAttributes'])
else: # If info deleted
delete_item_info(form_info)
# Delete row from database
def delete_item_info(form_info):
obj_id = form_info['attributes']['id']['value']
if (id != ''):
model_name = form_info['name']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
Model.objects.filter(id=obj_id).delete()
delete_multiple_items('m2m', form_info['attributes'])
delete_multiple_items('o2m', form_info['attributes'])
if ('connectedAttributes' in form_info.keys()):
delete_item_info(form_info['connectedAttributes'])
# Get all items in m2m table
def get_m2m_items(m2m_table, connected_field1_id):
m2m_forms = []
if connected_field1_id:
# Get config info
connected_table = m2m_table['connected_table']
connected_field1 = m2m_table['connected_field1']
connected_field2 = m2m_table['connected_field2']
connected_model_name = connected_table['model_name']
# Get connected model objects to query connected_field2_id
ConnectedModel = apps.get_model(app_label='dimadb', model_name=connected_model_name)
filter_params = {connected_field1: connected_field1_id}
connected_objects = list(ConnectedModel.objects.filter(**filter_params))
connected_objects = [model_to_dict(connected_obj) for connected_obj in connected_objects]
# For each connected object (row) in connected table, query and create form for that connected object + foreign object
for connected_obj in connected_objects:
connected_form = get_item_detail_form(connected_obj['id'], connected_table)
m2m_form = get_item_detail_form(connected_obj[connected_field2], m2m_table)
m2m_form['connectedAttributes'] = connected_form
m2m_form['connectedAttributes']['connected_field1'] = connected_field1
m2m_form['connectedAttributes']['connected_field2'] = connected_field2
m2m_forms.append(m2m_form)
return m2m_forms
# Get all items in o2m table
def get_o2m_items(o2m_table, connected_field_id):
o2m_forms = []
if connected_field_id:
# Get config info
o2m_model_name = o2m_table['model_name']
connected_field = o2m_table['connected_field']
# Get o2m model objects
O2MModel = apps.get_model(app_label='dimadb', model_name=o2m_model_name)
filter_params = {connected_field: connected_field_id}
o2m_objects = list(O2MModel.objects.filter(**filter_params))
o2m_objects = [model_to_dict(obj) for obj in o2m_objects]
# Create o2m item form (row)
for o2m_obj in o2m_objects:
o2m_form = get_item_detail_form(o2m_obj['id'], o2m_table)
o2m_form['connected_field'] = connected_field
o2m_forms.append(o2m_form)
return o2m_forms
# Update/New alternately items in m2m/o2m table
def update_multiple_items(table_type, obj, connected_field1_id=None):
for attribute in obj.keys():
if attribute != 'id':
if obj[attribute]['type'] == table_type:
list_values = obj[attribute]['value']
for value in list_values:
update_item_info(value, connected_field1_id)
# Delete alternately items in m2m table
def delete_multiple_items(table_type, obj):
for attribute in obj.keys():
if attribute != 'id':
if obj[attribute]['type'] == table_type:
list_values = obj[attribute]['value']
for value in list_values:
delete_item_info(value)
# Create object in connected table (eventlocation, eventresource, etc)
def create_connected_object(form_info, connected_field1_id, connected_field2_id):
connected_field1 = form_info['connected_field1']
connected_field2 = form_info['connected_field2']
model_name = form_info['name']
obj_info = filter_form_object_info(form_info['attributes'])
obj_info[connected_field1] = connected_field1_id
obj_info[connected_field2] = connected_field2_id
Model = apps.get_model(app_label='dimadb', model_name=model_name)
obj = Model(**obj_info)
obj.save()
#Mapping data in file with data model
def mapping_data(data, template, source_name):
try:
total = 0 # Total object rows in imported data
count = 0 # Total object rows saved in database
if isinstance(data, list):
total = len(data)
# Store history of import
import_info = ImportInfo(table_name=template['model_name'], source_name=source_name)
import_info.save()
# Get info from schema_detail
model_name = template['model_name']
fields = template['fields']
m2m_tables = []
o2m_tables = []
if ('m2m_tables' in template.keys()):
m2m_tables = template['m2m_tables']
if ('o2m_tables' in template.keys()):
o2m_tables = template['o2m_tables']
#Mapping
for obj in data:
obj_info = filter_imported_object_info(fields, obj)
if obj_info:
# Store obj in primary table
obj_info['import_id'] = import_info.id
Model = apps.get_model(app_label='dimadb', model_name=model_name)
new_obj = Model(**obj_info)
new_obj.save()
# Store additional objs in m2m tables
for m2m_table in m2m_tables:
m2m_model_name = m2m_table['model_name']
m2m_sources = m2m_table['sources']
for source in m2m_sources:
m2m_objs = []
if 'array' not in source:
m2m_objs.append(obj)
else:
if (pydash.get(obj, source['array'])):
m2m_objs = pydash.get(obj, source['array'])
for m2m_obj in m2m_objs:
m2m_obj_info = filter_imported_object_info(source['fields'], m2m_obj)
if (m2m_obj_info):
m2m_obj_info['import_id'] = import_info.id
M2MModel = apps.get_model(app_label='dimadb', model_name=m2m_model_name)
new_m2m_obj = M2MModel(**m2m_obj_info)
new_m2m_obj.save()
# Store obj in connected table
# Read configure info
connected_table = source['connected_table']
connected_field1 = source['connected_field1']
connected_field2 = source['connected_field2']
connected_model_name = connected_table['model_name']
connected_obj_info = filter_imported_object_info(connected_table['fields'], m2m_obj)
connected_obj_info[connected_field1] = new_obj.id
connected_obj_info[connected_field2] = new_m2m_obj.id
connected_obj_info['import_id'] = import_info.id
ConnectedModel = apps.get_model(app_label='dimadb', model_name=connected_model_name)
new_connected_obj = ConnectedModel(**connected_obj_info)
new_connected_obj.save()
# Store additional objs in o2m tables
for o2m_table in o2m_tables:
o2m_model_name = o2m_table['model_name']
sources = o2m_table['sources']
for source in sources:
o2m_objs = []
if 'array' not in source:
o2m_objs.append(obj)
else:
if (pydash.get(obj, source['array'])):
o2m_objs = pydash.get(obj, source['array'])
for o2m_obj in o2m_objs:
o2m_obj_info = filter_imported_object_info(source['fields'], o2m_obj)
if (o2m_obj_info):
connected_field = source['connected_field']
o2m_obj_info[connected_field] = new_obj.id
o2m_obj_info['import_id'] = import_info.id
O2MModel = apps.get_model(app_label='dimadb', model_name=o2m_model_name)
new_o2m_obj = O2MModel(**o2m_obj_info)
new_o2m_obj.save()
count += 1
return {'message': 'Import successfully' + '.\n' + 'Import ' + str(count) + '/' + str(total) + 'object(s).'}
else:
return {'message': 'Wrong json format'}
except Exception as error:
return {'message': 'There is an error(duplication, ...).\n' + 'Import ' + str(count) + '/' + str(total) + 'object(s).'}
# Some imported json file required to be reformated before mapping
def reformated_data(json_data, item_type, template_type):
try:
reformated_json_data = []
# Each item type & each template type => reformat differently
if (item_type == 'web-activity' and template_type == 'default'):
list_required_attributes = ['event_date', 'event_timestamp', 'items', 'event_name', 'device', 'geo', 'user_id', 'traffic_source']
list_required_event_params = ['ga_session_id', 'page_title', 'page_location']
for obj in json_data:
new_obj = {}
for attribute in list_required_attributes:
if attribute == 'event_date':
date = pydash.get(obj, attribute)
format_date = date[:4] + '-' + date[4:6] + '-' + date[6:8]
new_obj[attribute] = format_date
elif attribute == 'event_timestamp':
new_obj[attribute] = int(pydash.get(obj, attribute))
else:
new_obj[attribute] = pydash.get(obj, attribute)
for param in obj['event_params']:
key = param['key']
values = param['value']
if (key in list_required_event_params):
for value in values:
if values[value] != None:
new_obj[key] = values[value]
else:
continue
for item in new_obj['items']:
item['item_eventname'] = new_obj['event_name']
reformated_json_data.append(new_obj)
elif (item_type == 'google-analytic' and template_type == 'default'):
list_required_attributes = ['date', 'eventName', 'deviceCategory', 'country', 'pageLocation', 'eventCount', 'sessions', 'operatingSystem', 'browser']
for obj in json_data:
new_obj = {}
for attribute in list_required_attributes:
if attribute == 'date':
date = pydash.get(obj, attribute)
format_date = date[:4] + '-' + date[4:6] + '-' + date[6:8]
new_obj[attribute] = format_date
else:
new_obj[attribute] = pydash.get(obj, attribute)
reformated_json_data.append(new_obj)
return reformated_json_data
except Exception as exception:
return exception
@api_view(['POST'])
@authentication_classes([])
@permission_classes([])
def import_json_file(request, item_type):
try:
# Get request info
files = request.FILES.getlist('files[]')
file = files[0]
json_data = json.load(file)
# Get template configuration info
template_type = request.POST.get('template')
if (template_type is None or template_type == ''):
template_type = 'default'
template = get_json_info(mapping_template_file_path, item_type + '.' + template_type)
is_reformat = template['is_reformat']
# Check reformat
if is_reformat:
json_data = reformated_data(json_data, item_type, template_type)
#Mapping and saving in database
mapping_result = mapping_data(json_data, template, file.name)
return Response(mapping_result, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_mapping_templates(request, item_type):
try:
list_templates = []
json_file = open(mapping_template_file_path)
json_data = json.load(json_file)
json_file.close()
list_templates = [key for key in json_data[item_type]]
return Response({'listTemplates': list_templates}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
@authentication_classes([])
@permission_classes([])
def import_api(request, item_type):
try:
# Get request info
request_body = json.loads(request.body)
url = request_body['url']
bearer_token = request_body['bearerToken']
template_type = request_body['template']
# Get data from url
http = urllib3.PoolManager()
header = {'Accept': '*/*'}
if (bearer_token != ''):
header['Authorization'] = 'Bearer ' + bearer_token
if (template_type is None or template_type == ''):
template_type = 'default'
response = http.request('GET', url, headers=header)
response_body = json.loads(response.data)
response_data = response_body['data']
# Import
mapping_template = get_json_info(mapping_template_file_path, item_type + '.' + template_type)
mapping_result = mapping_data(response_data, mapping_template, url)
return Response(mapping_result, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_import_info(request, item_type):
try:
tables = {
"event": "events",
"article": "products",
"web-activity": "interaction_f",
"google-analytic-report": "interaction_ga",
}
snippets = ImportInfo.objects.filter(table_name=tables[item_type])
serializer = ImportInfoSerializer(snippets, many=True)
return Response({'items': serializer.data}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['DELETE'])
def delete_imported_items(request, item_type, pk):
try:
tables = {
"event": ["events", "businessentity", "entityeventrole", "eventdate"],
"article": ["products", "businessentity", "entityproductrole"],
"web-activity": ["interaction_f"],
"google-analytic-report": ["interaction_ga"]
}
for table in tables[item_type]:
Model = apps.get_model(app_label='dimadb', model_name=table)
Model.objects.filter(import_id=pk).delete()
ImportInfo.objects.filter(id=pk).delete()
return Response({}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Generate recommend api to retrieve recommendation
def generate_recommend_api(level, item_type, recommend_type, quantity, domain, item_url):
api = IP_DOMAIN + '/dimadb/get-list-recommend/?'
api += 'itemType=' + item_type
api += '&level=' + level
api += '&quantity=' + quantity
if (recommend_type):
api += '&recommendType=' + recommend_type
if (domain):
api += '&domain=' + domain
if (item_url):
api += '&itemUrl=' + item_url
return api
recommend_display_fields = {
'events': ['event_id', 'event_name', 'event_type', 'next_date', 'url', 'img', 'location_name'],
'products': ['product_id', 'product_name', 'product_type', 'url', 'img']
}
# Get upcoming recommendation
def get_upcoming(table_name, quantity=1, domain=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
filter_params = {}
if (domain is not None):
if (table_name == 'events'):
filter_params['event_type'] = domain
elif (table_name == 'products'):
filter_params['product_type'] = domain
list_objs = Model.objects.filter(Q(**filter_params))
list_objs = [model_to_dict(obj) for obj in list(list_objs)]
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_objs:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_objs = sorted(list_filtered_obj, key=lambda x: x['next_date'])
else:
list_objs = []
for i in range(0, int(quantity)):
if (i < len(list_objs)):
obj = list_objs[i]
recommend_item = {}
for field in list(display_fields):
recommend_item[field] = obj[field]
list_recommend_items.append(recommend_item)
return list_recommend_items
# Get most popular recommendation
def order_by_score(table_name, list_objs):
if (len(list_objs)):
list_interactions_f = Interaction_f.objects.filter(page_location__in=[obj['url'] for obj in list_objs])
list_interactions_f = [model_to_dict(obj) for obj in list_interactions_f]
if (len(list_interactions_f)):
list_interactions_f = pd.DataFrame(list_interactions_f).groupby(['page_location', 'event_name'], as_index=False)['id'].count().rename(columns={'id':'event_count'}).to_dict('r')
list_interactions_ga = list(Interaction_ga.objects.filter(page_location__in=[obj['url'] for obj in list_objs]).values('page_location', 'event_name', 'event_count'))
list_interactions = list_interactions_f + list_interactions_ga
if (len(list_interactions)):
list_interactions = pd.DataFrame(list_interactions).groupby(['page_location', 'event_name'], as_index=False).sum().to_dict('r')
list_objs_weight = {}
for interaction in list_interactions:
page_location = interaction['page_location']
event_name = interaction['event_name']
event_count = interaction['event_count']
activity_weight = 0
try:
activity_type_info = model_to_dict(WebActivityType.objects.get(name=event_name))
activity_weight = activity_type_info['value']
except:
activity_weight = 1
if page_location not in list_objs_weight:
list_objs_weight[page_location] = 0
list_objs_weight[page_location] += event_count * activity_weight
for obj in list_objs:
if obj['url'] in list_objs_weight:
obj['popular_score'] = list_objs_weight[obj['url']]
else:
obj['popular_score'] = 0
if (len(list_objs)):
list_objs = sorted(list_objs, key=lambda d: d['popular_score'], reverse=True)
else:
list_objs = []
return list_objs
# Get most popular recommendation
def get_most_popular(table_name, quantity=1, domain=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
filter_params = {}
list_interactions = []
if (domain is not None):
if (table_name == 'events'):
filter_params['event_type'] = domain
elif (table_name == 'products'):
filter_params['product_type'] = domain
list_objs = Model.objects.filter(Q(**filter_params))
list_objs = [model_to_dict(obj) for obj in list(list_objs)]
# list_objs = order_by_score(table_name, list_objs)
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_objs:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_objs = sorted(list_filtered_obj, key=lambda x: x['next_date'])
else:
list_objs = []
list_objs = order_by_score(table_name, list_objs)
# if (len(list_objs)):
# list_interactions_f = Interaction_f.objects.filter(page_location__in=[obj['url'] for obj in list_objs])
# list_interactions_f = [model_to_dict(obj) for obj in list_interactions_f]
# if (len(list_interactions_f)):
# list_interactions_f = pd.DataFrame(list_interactions_f).groupby(['page_location', 'event_name'], as_index=False)['id'].count().rename(columns={'id':'event_count'}).to_dict('r')
# list_interactions_ga = list(Interaction_ga.objects.filter(page_location__in=[obj['url'] for obj in list_objs]).values('page_location', 'event_name', 'event_count'))
# list_interactions = list_interactions_f + list_interactions_ga
# if (len(list_interactions)):
# list_interactions = pd.DataFrame(list_interactions).groupby(['page_location', 'event_name'], as_index=False).sum().to_dict('r')
# list_objs_weight = {}
# for interaction in list_interactions:
# page_location = interaction['page_location']
# event_name = interaction['event_name']
# event_count = interaction['event_count']
# activity_weight = 0
# try:
# activity_type_info = model_to_dict(WebActivityType.objects.get(name=event_name))
# activity_weight = activity_type_info['value']
# except:
# activity_weight = 1
# if page_location not in list_objs_weight:
# list_objs_weight[page_location] = 0
# list_objs_weight[page_location] += event_count * activity_weight
# for obj in list_objs:
# if obj['url'] in list_objs_weight:
# obj['popular_score'] = list_objs_weight[obj['url']]
# else:
# obj['popular_score'] = 0
# if (len(list_objs)):
# list_objs = sorted(list_objs, key=lambda d: d['popular_score'], reverse=True)
# else:
# list_objs = []
for i in range(0, int(quantity)):
if (i < len(list_objs)):
obj = list_objs[i]
recommend_item = {}
for field in list(display_fields):
recommend_item[field] = obj[field]
recommend_item['popular_score'] = obj['popular_score']
list_recommend_items.append(recommend_item)
if (len(list_recommend_items) == 0):
list_recommend_items = get_upcoming(table_name, quantity)
return list_recommend_items
# Get similarity recommendation
def get_similar(table_name, quantity=1, item_url=None, recommend_type=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
item_id = Model.objects.get(url=item_url).id
list_similar_items = ContentBasedRecommender.recommend_items_by_items(table_name=table_name, items_id=item_id)
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_similar_items:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_similar_items = sorted(list_filtered_obj, key=lambda x: x['similarity_score'], reverse=True)
else:
list_similar_items = []
if (recommend_type == 'Similar combined with Most popular'):
list_similar_items = order_by_score(table_name, list_similar_items)
for i in range(0, int(quantity)):
if (i < len(list_similar_items)):
similar_obj = list_similar_items[i]
obj = Model.objects.get(id=similar_obj['id'])
obj = model_to_dict(obj)
recommend_item = {}
for field in list(display_fields):
if field in obj:
recommend_item[field] = obj[field]
if (table_name == 'events'):
recommend_item['next_date'] = similar_obj['next_date']
if (recommend_type == 'Similar combined with Most popular'):
recommend_item['popular_score'] = similar_obj['popular_score']
recommend_item['similarity_score'] = similar_obj['similarity_score']
list_recommend_items.append(recommend_item)
if (len(list_recommend_items) == 0):
list_recommend_items = get_upcoming(table_name, quantity)
return list_recommend_items
# Get list of recommend items
def get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url):
list_recommend_items = []
if (level == 'Homepage'):
if (recommend_type == 'Upcoming'):
if (item_type == 'events'):
list_recommend_items = get_upcoming(table_name=item_type, quantity=quantity)
if (recommend_type == 'Most popular'):
if (item_type == 'events'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity)
elif (item_type == 'products'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity)
elif (level == 'Domain'):
if (recommend_type == 'Upcoming'):
if (item_type == 'events'):
list_recommend_items = get_upcoming(table_name=item_type, quantity=quantity, domain=domain)
if (recommend_type == 'Most popular'):
if (item_type == 'events'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity, domain=domain)
elif (item_type == 'products'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity, domain=domain)
else:
if (item_type == 'events'):
list_recommend_items = get_similar(table_name=item_type, quantity=quantity, item_url=item_url, recommend_type=recommend_type)
elif (item_type == 'products'):
list_recommend_items = get_similar(table_name=item_type, quantity=quantity, item_url=item_url, recommend_type=recommend_type)
return list_recommend_items
@api_view(['GET'])
@authentication_classes([])
@permission_classes([])
def get_list_recommend(request):
try:
# Authorization
bearer_token = request.headers.get('Authorization')
if (bearer_token == 'Bearer ' + API_KEY):
# Read request info
level = request.GET.get('level', None)
item_type = request.GET.get('itemType', None)
recommend_type = request.GET.get('recommendType', None)
quantity = request.GET.get('quantity', None)
domain = request.GET.get('domain', None)
item_url = request.GET.get('itemUrl', None)
list_recommend_items = get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url)
return Response({'itemType': item_type, 'recommendType': recommend_type, 'items': list_recommend_items}, status=status.HTTP_200_OK)
else:
return Response({'message': 'Authorization failed'}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as error:
return Response({'message': error})
def get_embedded_link(api, recommend_type, is_gui=False):
recommendItems = ''
if (recommend_type == 'Upcoming'):
recommendItems = 'upComingItems'
elif (recommend_type == 'Most popular'):
recommendItems = 'popularItems'
elif (recommend_type == 'Similar'):
recommendItems = 'similarItems'
elif (recommend_type == 'Similar combined with Most popular'):
recommendItems = 'popularSimilarItems'
else:
recommendItems = 'upComingItems'
embedded_link = ''
css_link = '<link rel="stylesheet" href="' + IP_DOMAIN + '/static/dimadb/css/recommender.css">'
div_link = '<div id="' + recommendItems + '"></div>'
js_link = '<script src="' + IP_DOMAIN + '/static/dimadb/js/recommender.js' + '"></script>'
recommend_link = '<script>' + '\n'
recommend_link += '\tvar ' + recommendItems + ' = getRecommend("' + api + '", "' + API_KEY + '");' + '\n'
recommend_link += '\t' + recommendItems +'.then(res => {' + '\n'
recommend_link += '\t\t//Handle recommend items here' + '\n'
if (is_gui):
recommend_link += '\t\t//Below code shows recommendation GUI' + '\n'
recommend_link += '\t\tgetListView("' + recommendItems + '", res);' + '\n'
else:
recommend_link += '\t\t//Below code shows recommendation results' + '\n'
recommend_link += '\t\tconsole.log(res);' + '\n'
recommend_link += '\t});' + '\n'
recommend_link += '</script>'
embedded_link = css_link + '\n' + div_link + '\n' + js_link + '\n' + recommend_link
return embedded_link
@api_view(['POST'])
def get_recommend_api(request):
try:
# Read request info
body = json.loads(request.body)
level = body['level']
item_type = body['itemType']
recommend_type = body['recommendType']
quantity = body['quantity']
domain = body['domain']
item_url = body['itemUrl']
#Get recommend api + recommend list
api = generate_recommend_api(level, item_type, recommend_type, quantity, domain, item_url)
list_recommend_items = get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url)
embedded_links = [
{
"name": "Script dynamique et intégré dans chaque page (sans la génération des interfaces)",
"link": get_embedded_link(api, recommend_type, is_gui=False),
}, {
"name": "Script dynamique et intégré dans chaque page (avec la génération des interfaces)",
"link": get_embedded_link(api, recommend_type, is_gui=True),
}
]
return Response({
'items': list_recommend_items,
'api': api, 'apiKey': API_KEY,
'embeddedDynamicLinks': embedded_links,
}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
def train_similar_recommend(request):
try:
# Read request info
body = json.loads(request.body)
item_type = body['itemType']
# Training
ContentBasedRecommender.train_items_by_items(table_name=item_type)
# Get similarity recommendation training info
similar_train_info = get_similar_train_info()
return Response({'similarTrainInfo': similar_train_info}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_recommend_info(request):
try:
# Recommend info
# recommend_levels = {
# "Homepage": ["Upcoming", "Most popular"],
# "Domain": ["Upcoming", "Most popular"],
# "Item": ["Similar", "Similar combined with Most popular"]
# }
recommend_types = [
{
"name": "Upcoming",
"displayName": "À venir"
}, {
"name": "Most popular",
"displayName": "Les plus populaires"
}, {
"name": "Similar",
"displayName": "Produits similaires"
}, {
"name": "Similar combined with Most popular",
"displayName": "Produits similaires combinés avec les plus populaires"
}
]
recommend_levels = {
"Homepage": {
"displayName": "Page d'accueil",
"algorithms": [recommend_types[0], recommend_types[1]]
},
"Domain": {
"displayName": "Domaine",
"algorithms": [recommend_types[0], recommend_types[1]]
},
"Item": {
"displayName": "Produit",
"algorithms": [recommend_types[2], recommend_types[3]]
}
}
# Get list domain(item_type)
event_snippets = Events.objects.all()
event_serializer = EventSerializer(event_snippets, many=True)
event_types = Events.objects.values('event_type').distinct()
event_types = [item['event_type'] for item in list(event_types)]
article_snippets = Products.objects.all()
article_serializer = ArticleSerializer(article_snippets, many=True)
article_types = Products.objects.values('product_type').distinct()
article_types = [item['product_type'] for item in list(article_types)]
list_item_infos = {
"events": {
"name": "Événements",
"items": event_serializer.data,
"types": event_types
},
"products": {
"name": "Articles",
"items": article_serializer.data,
"types": article_types
}
}
embedded_links = [
{
"name": "Script fixé et intégré dans la page d'accueil (sans la génération des interfaces)",
"link": get_embedded_recommendation(is_gui=False),
},
{
"name": "Script fixé et intégré dans la page d'accueil (avec la génération des interfaces)",
"link": get_embedded_recommendation(is_gui=True)
}
]
return Response({'embeddedFixedLinks': embedded_links,
'recommendLevels': recommend_levels,
'listItemInfos': list_item_infos}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Get history of similarity recommendation training
def get_similar_train_info():
try:
list_item_types = [{'name': 'Événement', 'value': 'events'},
{'name': 'Article', 'value': 'products'}]
for item_type in list_item_types:
Model = apps.get_model(app_label='dimadb', model_name=item_type['value'])
item_type['number_items'] = len(Model.objects.all())
# Get total number of trained items
if (LdaSimilarityVersion.objects.filter(item_type=item_type['value']).exists()):
obj = LdaSimilarityVersion.objects.filter(item_type=item_type['value']).latest('created_at')
item_type['latest_training_at'] = str(obj)
item_type['number_trained_items'] = model_to_dict(obj)['n_products']
else:
item_type['latest_training_at'] = ''
item_type['number_trained_items'] = 0
# Get total number of items
Model = apps.get_model(app_label='dimadb', model_name=item_type['value'])
item_type['number_items'] = len(Model.objects.all())
return list_item_types
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_configure_info(request):
try:
similar_train_info = get_similar_train_info()
web_activity_types_f = Interaction_f.objects.values('event_name').distinct()
web_activity_types_f = [item['event_name'] for item in list(web_activity_types_f)]
web_activity_types_ga = Interaction_ga.objects.values('event_name').distinct()
web_activity_types_ga = [item['event_name'] for item in list(web_activity_types_ga)]
web_activity_types = list(dict.fromkeys(web_activity_types_f + web_activity_types_ga))
existed_web_activity_types = WebActivityType.objects.values('name').distinct()
existed_web_activity_types = [item['name'] for item in list(existed_web_activity_types)]
web_activity_types = web_activity_types + existed_web_activity_types
web_activity_types = list(dict.fromkeys(web_activity_types))
web_activity_types = [type for type in web_activity_types if type in ['user_engagement', 'scroll', 'page_view']]
web_activities_info = {}
for activity_type in web_activity_types:
try:
activity_type_obj = WebActivityType.objects.get(name=activity_type)
activity_type_obj = model_to_dict(activity_type_obj)
web_activities_info[activity_type] = activity_type_obj['value']
except:
web_activities_info[activity_type] = 0
return Response({'similarTrainInfo': similar_train_info, 'webActivityInfo': web_activities_info}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
def update_activity_weight(request):
try:
# Read requestinfo
body = json.loads(request.body)
web_activity_types = body['webActivityInfo']
#Update/new web activity type
for type in web_activity_types:
try:
web_activities = list(WebActivityType.objects.filter(name=type))
# Check whether type exists in WebActivityType table
if (len(web_activities)):
web_activity = web_activities[0]
web_activity.value = web_activity_types[type]
web_activity.save()
else:
new_activity_type = WebActivityType(name=type, value=web_activity_types[type])
new_activity_type.save()
except:
new_activity_type = WebActivityType(name=type, value=web_activity_types[type])
new_activity_type.save()
return Response({}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Generate report object (info, name, title, data)
def create_report(name, title, data, chart_type, is_change):
return {
'name': name,
'title': title,
'data': data,
'type': chart_type,
'isChange': is_change,
'random': name + str(random.randint(0, 1000)),
}
@api_view(['GET'])
def get_reports(request):
try:
start_date = request.GET.get('startDate', date.today())
end_date = request.GET.get('endDate', date.today())
group_type = request.GET.get('groupBy', 'daily')
reports = []
#Session
if (group_type == 'none'):
sessions_file = Interaction_f.objects.filter(
visit_date__range=[start_date, end_date]).values('session_id').distinct().count()
sessions_ga = Interaction_ga.objects.filter(
date__range=[start_date, end_date]).aggregate(Sum('session_count'))['session_count__sum'] or 0
sessions = [{'type': 'all', 'sum': sessions_file + sessions_ga}]
elif (group_type == 'daily'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date')).annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date')).annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['day'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['day'])
elif (group_type == 'weekly'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['week'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['week'])
elif (group_type == 'monthly'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['month'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['month'])
else:
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['year'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['year'])
reports.append(create_report('session_report', 'Statistiques de sessions Web',
sessions, 'column', group_type == 'none'))
# Web_activities:
if (group_type == 'none'):
web_activities_file = Interaction_f.objects.filter(
visit_date__range=[start_date, end_date]).all().count()
web_activities_ga = Interaction_ga.objects.filter(
date__range=[start_date, end_date]).aggregate(Sum('event_count'))['event_count__sum'] or 0
web_activities = [{'type': 'all', 'sum': web_activities_file + web_activities_ga}]
elif (group_type == 'daily'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(day=F('visit_date')).annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(day=F('date')).annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['day'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['week'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['week'])
elif (group_type == 'monthly'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['month'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['month'])
else:
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['year'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['year'])
reports.append(create_report('web_activities_report',
'Statistiques d’activités Web', web_activities, 'column', group_type == 'none'))
# Web Activities device_category:
if (group_type == 'none'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['type'], as_index=False).sum().to_dict('r')
elif (group_type == 'daily'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date'), type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date'), type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device =
|
pd.DataFrame(web_activities_device)
|
pandas.DataFrame
|
from linearmodels.compat.statsmodels import Summary
from itertools import product
import struct
from typing import Optional
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy.sparse as sp
from scipy.sparse import csc_matrix
from linearmodels.iv._utility import annihilate
from linearmodels.iv.absorbing import (
_VARIABLE_CACHE,
AbsorbingLS,
AbsorbingRegressor,
Interaction,
category_continuous_interaction,
category_interaction,
category_product,
clear_cache,
)
from linearmodels.iv.model import _OLS
from linearmodels.iv.results import AbsorbingLSResults, OLSResults
from linearmodels.panel.utility import (
AbsorbingEffectError,
AbsorbingEffectWarning,
dummy_matrix,
)
from linearmodels.shared.exceptions import MissingValueWarning
from linearmodels.shared.utility import AttrDict
NOBS = 100
pytestmark = pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
class Hasher(object):
@property
def hash_func(self):
try:
import xxhash
return xxhash.xxh64()
except ImportError:
import hashlib
return hashlib.sha256()
def single(self, value):
h = self.hash_func
h.update(np.ascontiguousarray(value))
return h.hexdigest()
hasher = Hasher()
@pytest.fixture(scope="function")
def random_gen(request):
return np.random.RandomState(12345678)
def random_cat(ncat, size, frame=False, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(pd.Categorical(rs.randint(0, ncat, size)))
if frame:
return pd.DataFrame(series)
return series
def random_cont(size, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(rs.standard_normal(size))
return pd.DataFrame(series)
@pytest.fixture(scope="module", params=[1, 2, 3])
def cat(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{str(i): random_cat(4, NOBS, rs=rs) for i in range(request.param)}
)
@pytest.fixture(scope="module", params=[1, 2])
def cont(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{
"cont" + str(i): pd.Series(rs.standard_normal(NOBS))
for i in range(request.param)
}
)
@pytest.fixture(scope="module", params=[True, False])
def weights(request):
if not request.param:
return None
rs = np.random.RandomState(0)
return rs.chisquare(10, NOBS) / 10.0
@pytest.fixture(scope="module", params=[0, 1, 2])
def interact(request):
if not request.param:
return None
rs = np.random.RandomState(0)
interactions = []
for _ in range(request.param):
cat = random_cat(4, 100, frame=True, rs=rs)
cont = random_cont(100, rs=rs)
interactions.append(Interaction(cat, cont))
return interactions
def generate_data(
k=3,
const=True,
nfactors=1,
factor_density=10,
nobs=2000,
cont_interactions=1,
factor_format="interaction",
singleton_interaction=False,
weighted=False,
ncont=0,
):
rs = np.random.RandomState(1234567890)
density = [factor_density] * max(nfactors, cont_interactions)
x = rs.standard_normal((nobs, k))
if const:
x = np.column_stack([np.ones(nobs), x])
e = rs.standard_normal(nobs)
y = x.sum(1) + e
factors = []
for i in range(nfactors):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(ncat)
y += effects[fact]
factors.append(pd.Series(pd.Categorical(fact)))
for i in range(ncont):
cont = rs.standard_normal(size=nobs)
factors.append(pd.Series(cont))
if factors:
factors = pd.concat(factors, axis=1)
if factor_format == "interaction":
if nfactors and ncont:
factors = Interaction(
factors.iloc[:, :nfactors], factors.iloc[:, nfactors:]
)
elif nfactors:
factors = Interaction(factors, None)
else:
factors = Interaction(None, factors)
else:
factors = None
interactions = []
for i in range(cont_interactions):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(nobs)
y += effects
df = pd.DataFrame(
pd.Series(pd.Categorical(fact)), columns=["fact{0}".format(i)]
)
df_eff = pd.DataFrame(effects[:, None], columns=["effect_{0}".format(i)])
interactions.append(Interaction(df, df_eff))
if factor_format == "pandas":
for i, interact in enumerate(interactions):
interactions[i] = pd.concat([interact.cat, interact.cont], axis=1)
interactions = interactions if interactions else None
if interactions and singleton_interaction:
interactions = interactions[0]
if weighted:
weights = pd.DataFrame(rs.chisquare(10, size=(nobs, 1)) / 10)
else:
weights = None
return AttrDict(
y=y, x=x, absorb=factors, interactions=interactions, weights=weights
)
# Permutations, k in (0,3), const in (True,False), factors=(0,1,2), interactions in (0,1)
# k=3, const=True, nfactors=1, factor_density=10, nobs=2000, cont_interactions=1,
# format='interaction', singleton_interaction=False
configs = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[10], # density
[2000], # nobs
[0, 1], # cont interactions
["interaction", "pandas"], # format
[False, True], # singleton
[False, True], # weighted
[0, 1], # ncont
)
data_configs = [c for c in configs if (c[2] or c[5] or c[9])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
data_ids = [id_str.format(*config) for config in configs]
@pytest.fixture(scope="module", params=data_configs, ids=data_ids)
def data(request):
return generate_data(*request.param)
configs_ols = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[50], # density
[500], # nobs
[0, 1], # cont interactions
["interaction"], # format
[False], # singleton
[False, True], # weighted
[0, 1], # ncont
)
configs_ols_data = [c for c in configs_ols if (c[0] or c[1])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
ids_ols_data = [id_str.format(*config) for config in configs_ols]
@pytest.fixture(scope="module", params=configs_ols_data, ids=ids_ols_data)
def ols_data(request):
return generate_data(*request.param)
@pytest.mark.smoke
def test_smoke(data):
mod = AbsorbingLS(
data.y,
data.x,
absorb=data.absorb,
interactions=data.interactions,
weights=data.weights,
)
res = mod.fit()
assert isinstance(res.summary, Summary)
assert isinstance(str(res.summary), str)
def test_absorbing_exceptions(random_gen):
with pytest.raises(TypeError):
absorbed = random_gen.standard_normal((NOBS, 2))
assert isinstance(absorbed, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=absorbed,
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS), random_gen.standard_normal((NOBS - 1, 2))
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=pd.DataFrame(random_gen.standard_normal((NOBS - 1, 1))),
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS - 1, frame=True, rs=random_gen),
)
mod = AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS, frame=True, rs=random_gen),
)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_dependent, pd.DataFrame)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_exog, pd.DataFrame)
with pytest.raises(TypeError):
interactions = random_gen.randint(0, 10, size=(NOBS, 2))
assert isinstance(interactions, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=interactions,
)
def test_clear_cache():
_VARIABLE_CACHE["key"] = {"a": np.empty(100)}
clear_cache()
assert len(_VARIABLE_CACHE) == 0
def test_category_product(cat):
prod = category_product(cat)
if cat.shape[1] == 1:
assert_series_equal(prod, cat.iloc[:, 0], check_names=False)
else:
alt = cat.iloc[:, 0].astype("int64")
for i in range(1, cat.shape[1]):
alt += 10 ** (4 * i) * cat.iloc[:, i].astype("int64")
alt = pd.Categorical(alt)
alt = pd.Series(alt)
df = pd.DataFrame([prod.cat.codes, alt.cat.codes], index=["cat_prod", "alt"]).T
g = df.groupby("cat_prod").alt
assert (g.nunique() == 1).all()
g = df.groupby("alt").cat_prod
assert (g.nunique() == 1).all()
def test_category_product_too_large(random_gen):
dfc = {}
for i in range(20):
dfc[str(i)] = random_cat(10, 1000)
cat = pd.DataFrame(dfc)
with pytest.raises(ValueError):
category_product(cat)
def test_category_product_not_cat(random_gen):
cat = pd.DataFrame(
{str(i): pd.Series(random_gen.randint(0, 10, 1000)) for i in range(3)}
)
with pytest.raises(TypeError):
category_product(cat)
def test_category_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
actual = category_interaction(c, precondition=False).A
expected = np.zeros((6, 2))
expected[:3, 0] = 1.0
expected[3:, 1] = 1.0
assert_allclose(actual, expected)
actual = category_interaction(c, precondition=True).A
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual, expected)
def test_category_continuous_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[:3, 0] = v[:3]
expected[3:, 1] = v[3:]
assert_allclose(actual.A, expected)
actual = category_continuous_interaction(c, v, precondition=True)
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual.A, expected)
def test_category_continuous_interaction_interwoven():
c = pd.Series(pd.Categorical([0, 1, 0, 1, 0, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[::2, 0] = v[::2]
expected[1::2, 1] = v[1::2]
assert_allclose(actual.A, expected)
def test_interaction_cat_only(cat):
interact = Interaction(cat=cat)
assert interact.nobs == cat.shape[0]
assert_frame_equal(cat, interact.cat)
expected = category_interaction(category_product(cat), precondition=False)
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected.A, actual.A)
def test_interaction_cont_only(cont):
interact = Interaction(cont=cont)
assert interact.nobs == cont.shape[0]
assert_frame_equal(cont, interact.cont)
expected = cont.to_numpy()
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected, actual.A)
def test_interaction_cat_cont(cat, cont):
interact = Interaction(cat=cat, cont=cont)
assert interact.nobs == cat.shape[0]
assert_frame_equal(cat, interact.cat)
assert_frame_equal(cont, interact.cont)
base = category_interaction(category_product(cat), precondition=False).A
expected = []
for i in range(cont.shape[1]):
element = base.copy()
element[np.where(element)] = cont.iloc[:, i].to_numpy()
expected.append(element)
expected = np.column_stack(expected)
actual = interact.sparse
assert isinstance(actual, csc_matrix)
assert_allclose(expected, interact.sparse.A)
def test_interaction_from_frame(cat, cont):
base = Interaction(cat=cat, cont=cont)
interact = Interaction.from_frame(pd.concat([cat, cont], axis=1))
assert_allclose(base.sparse.A, interact.sparse.A)
def test_interaction_cat_bad_nobs():
with pytest.raises(ValueError):
Interaction()
with pytest.raises(ValueError):
Interaction(cat=np.empty((100, 0)), cont=np.empty((100, 0)))
def test_empty_interaction():
interact = Interaction(nobs=100)
assert isinstance(interact.sparse, csc_matrix)
assert interact.sparse.shape == (100, 0)
def test_interaction_cat_cont_convert(cat, cont):
base = Interaction(cat, cont)
interact = Interaction(cat.to_numpy(), cont)
assert_allclose(base.sparse.A, interact.sparse.A)
def test_absorbing_regressors(cat, cont, interact, weights):
areg = AbsorbingRegressor(
cat=cat, cont=cont, interactions=interact, weights=weights
)
rank = areg.approx_rank
expected_rank = 0
expected = []
for i, col in enumerate(cat):
expected_rank += pd.Series(cat[col].cat.codes).nunique() - (i > 0)
expected.append(dummy_matrix(cat, precondition=False)[0])
expected_rank += cont.shape[1]
expected.append(csc_matrix(cont))
if interact is not None:
for inter in interact:
interact_mat = inter.sparse
expected_rank += interact_mat.shape[1]
expected.append(interact_mat)
expected = sp.hstack(expected, format="csc")
if weights is not None:
expected = (sp.diags(np.sqrt(weights)).dot(expected)).asformat("csc")
actual = areg.regressors
assert expected.shape == actual.shape
assert_array_equal(expected.indptr, actual.indptr)
assert_array_equal(expected.indices, actual.indices)
assert_allclose(expected.A, actual.A)
assert expected_rank == rank
def test_absorbing_regressors_hash(cat, cont, interact, weights):
areg = AbsorbingRegressor(
cat=cat, cont=cont, interactions=interact, weights=weights
)
# Build hash
hashes = []
for col in cat:
hashes.append((hasher.single(cat[col].cat.codes.to_numpy().data),))
for col in cont:
hashes.append((hasher.single(cont[col].to_numpy().data),))
hashes = sorted(hashes)
if interact is not None:
for inter in interact:
hashes.extend(inter.hash)
if weights is not None:
hashes.append((hasher.single(weights.data),))
hashes = tuple(sorted(hashes))
assert hashes == areg.hash
def test_empty_absorbing_regressor():
areg = AbsorbingRegressor()
assert areg.regressors.shape == (0, 0)
assert areg.hash == tuple()
def test_against_ols(ols_data):
mod = AbsorbingLS(
ols_data.y,
ols_data.x,
absorb=ols_data.absorb,
interactions=ols_data.interactions,
weights=ols_data.weights,
)
res = mod.fit()
absorb = []
has_dummy = False
if ols_data.absorb is not None:
absorb.append(ols_data.absorb.cont.to_numpy())
if ols_data.absorb.cat.shape[1] > 0:
dummies = dummy_matrix(ols_data.absorb.cat, precondition=False)[0]
assert isinstance(dummies, sp.csc_matrix)
absorb.append(dummies.A)
has_dummy = ols_data.absorb.cat.shape[1] > 0
if ols_data.interactions is not None:
for interact in ols_data.interactions:
absorb.append(interact.sparse.A)
_x = ols_data.x
if absorb:
absorb = np.column_stack(absorb)
if np.any(np.ptp(_x, 0) == 0) and has_dummy:
if ols_data.weights is None:
absorb = annihilate(absorb, np.ones((absorb.shape[0], 1)))
else:
root_w = np.sqrt(mod.weights.ndarray)
wabsorb = annihilate(root_w * absorb, root_w)
absorb = (1.0 / root_w) * wabsorb
rank = np.linalg.matrix_rank(absorb)
if rank < absorb.shape[1]:
a, b = np.linalg.eig(absorb.T @ absorb)
order = np.argsort(a)[::-1]
a, b = a[order], b[:, order]
z = absorb @ b
absorb = z[:, :rank]
_x = np.column_stack([_x, absorb])
ols_mod = _OLS(ols_data.y, _x, weights=ols_data.weights)
ols_res = ols_mod.fit()
assert_results_equal(ols_res, res)
def test_cache():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
first = len(_VARIABLE_CACHE)
mod = AbsorbingLS(
gen.y, gen.x, absorb=gen.absorb.iloc[:, :1], interactions=gen.interactions
)
mod.fit()
second = len(_VARIABLE_CACHE)
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
mod.fit()
third = len(_VARIABLE_CACHE)
assert third - second == 1
assert second - first == 1
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
mod.fit()
fourth = len(_VARIABLE_CACHE)
assert fourth - third == 0
def test_instrments():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
mod = AbsorbingLS(
gen.y, gen.x, absorb=gen.absorb.iloc[:, :1], interactions=gen.interactions
)
assert mod.instruments.shape[1] == 0
def assert_results_equal(
o_res: OLSResults, a_res: AbsorbingLSResults, k: Optional[int] = None
) -> None:
if k is None:
k = a_res.params.shape[0]
attrs = [v for v in dir(o_res) if not v.startswith("_")]
callables = ["conf_int"]
skip = [
"summary",
"test_linear_constraint",
"predict",
"model",
"f_statistic",
"wald_test",
"method",
"kappa",
]
for attr in attrs:
if attr in skip:
continue
left = getattr(o_res, attr)
right = getattr(a_res, attr)
if attr in callables:
left = left()
right = right()
if isinstance(left, np.ndarray):
raise NotImplementedError
elif isinstance(left, pd.DataFrame):
if attr == "conf_int":
left = left.iloc[:k]
elif attr == "cov":
left = left.iloc[:k, :k]
assert_allclose(left, right, rtol=2e-4, atol=1e-6)
elif isinstance(left, pd.Series):
assert_allclose(left.iloc[:k], right.iloc[:k], rtol=1e-5)
else:
if isinstance(left, float):
assert_allclose(left, right, atol=1e-10)
else:
assert left == right
assert isinstance(a_res.summary, Summary)
assert isinstance(str(a_res.summary), str)
assert isinstance(a_res.absorbed_effects, pd.DataFrame)
tol = 1e-4 if (8 * struct.calcsize("P")) < 64 else 0.0
assert a_res.absorbed_rsquared <= (a_res.rsquared + tol)
def test_center_cov_arg():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
mod = AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
res = mod.fit(center=True)
assert "center" not in res.cov_config
def test_drop_missing():
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
gen.y[::53] = np.nan
gen.x[::79] = np.nan
with pytest.warns(MissingValueWarning):
AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
gen = generate_data(
2, True, 2, factor_format="pandas", ncont=0, cont_interactions=1
)
for col in gen.absorb:
gen.absorb[col] = gen.absorb[col].astype("int64").astype("object")
col_iloc = gen.absorb.columns.get_loc(col)
gen.absorb.iloc[::91, col_iloc] = np.nan
gen.absorb[col] = pd.Categorical(gen.absorb[col].to_numpy())
with pytest.warns(MissingValueWarning):
AbsorbingLS(gen.y, gen.x, absorb=gen.absorb, interactions=gen.interactions)
def test_drop_absorb(random_gen):
absorb = random_gen.randint(0, 10, size=1000)
x = random_gen.standard_normal((1000, 3))
y = random_gen.standard_normal((1000))
dfd = {f"x{i}": pd.Series(x[:, i]) for i in range(3)}
dfd.update({"c": pd.Series(absorb, dtype="category"), "y": pd.Series(y)})
df = pd.DataFrame(dfd)
y = df.y
x = df.iloc[:, :3]
x = pd.concat([x,
|
pd.get_dummies(df.c)
|
pandas.get_dummies
|
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
return df_book_features
def computeFeatures_newTest_Laurent_wTrades(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
list_trades1, list_trades2 = [], []
list_vlad_book, list_vlad_trades = [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
trades_stock = load_trades_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
trades_stock = load_trades_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap4(book_stock)
book_stock['mid_price'] = mid_price(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd =
|
pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
def time_since_storm(precipitation, perc_snow, time_step=1/24, mass=1.0,
time=4, stormDays=None, stormPrecip=None, ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series,
percent snow, mass threshold, and time threshold
- Will look for pixels where perc_snow > 50% as storm locations
- A new storm will start if the mass at the pixel has exceeded the mass
limit, this ensures that the enough has accumulated
Args:
precipitation: Precipitation values
perc_snow: Percent of precipitation that was snow
time_step: Step in days of the model run
mass: Threshold for the mass to start a new storm
time: Threshold for the time to start a new storm
stormDays: If specified, this is the output from a previous run of
storms else it will be set to the date_time value
stormPrecip: Keeps track of the total storm precip
Returns:
tuple:
- **stormDays** - Array representing the days since the last storm at
a pixel
- **stormPrecip** - Array representing the precip accumulated during
the most recent storm
Created Janurary 5, 2016
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
if stormPrecip is None:
stormPrecip = np.zeros(precipitation.shape)
# if there is no snow, don't reset the counter
# This ensures that the albedo won't be reset
stormDays += 1
if np.sum(perc_snow) == 0:
# stormDays = np.add(stormDays, 1)
stormPrecip = np.zeros(precipitation.shape)
return stormDays, stormPrecip
# determine locations where it has snowed
idx = perc_snow >= ps_thresh
# determine locations where the time threshold has passed
# these areas, the stormPrecip will be set back to zero
idx_time = stormDays >= time
stormPrecip[idx_time] = 0
# add the values to the stormPrecip
stormPrecip[idx] = + precipitation[idx]
# see if the mass threshold has been passed
idx_mass = stormPrecip >= mass
# reset the stormDays to zero where the storm is present
stormDays[idx_mass] = 0
return stormDays, stormPrecip
def time_since_storm_pixel(precipitation, dpt, perc_snow, storming,
time_step=1/24, stormDays=None, mass=1.0,
ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series
- Will assign decimal days since last storm to every pixel
Args:
precipitation: Precipitation values
dpt: dew point values
perc_snow: percent_snow values
storming: if it is stomring
time_step: step in days of the model run
stormDays: unifrom days since last storm on pixel basis
mass: Threshold for the mass to start a new storm
ps_thresh: Threshold for percent_snow
Returns:
stormDays: days since last storm on pixel basis
Created October 16, 2017
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
# add timestep
stormDays += time_step
# only reset if stomring and not overly warm
if storming and dpt.min() < 2.0:
# determine location where there is enough mass
idx_mass = precipitation >= mass
# determine locations where it has snowed
idx = perc_snow >= ps_thresh
# reset the stormDays to zero where the storm is present
stormDays[(idx_mass & idx)] = 0
return stormDays
def tracking_by_station(precip, mass_thresh=0.01, steps_thresh=3):
"""
Processes the vector station data prior to the data being distributed
Args:
precipitation: precipitation values
time: Time step that smrf is on
time_steps_since_precip: time steps since the last precipitation
storm_lst: list that store the storm cycles in order. A storm is
recorded by its start and its end. The list
is passed by reference and modified internally.
Each storm entry should be in the format of:
[{start:Storm Start, end:Storm End}]
e.g.
[
{start:date_time1,end:date_time2,'BOG1':100, 'ATL1':85},
{start:date_time3,end:date_time4,'BOG1':50, 'ATL1':45},
]
would be a two storms at stations BOG1 and ATL1
mass_thresh: mass amount that constitutes a real precip event,
default = 0.01.
steps_thresh: Number of time steps that constitutes the end of a precip
event, default = 2 steps (typically 2 hours)
Returns:
tuple:
- **storms** - A list of dictionaries containing storm start,stop,
mass accumulated, of given storm.
- **storm_count** - A total number of storms found
Created April 24, 2017
@author: <NAME>
"""
storm_columns = ['start', 'end']
stations = list(precip)
storm_columns += stations
storms = []
stations = list(precip)
is_storming = False
time_steps_since_precip = 0
for i, row in precip.iterrows():
time = pd.Timestamp(i)
# Storm Idenificiation
if row.max() > mass_thresh:
# Start a new storm
if not is_storming:
new_storm = {}
new_storm['start'] = time
for sta, p in row.iteritems():
new_storm[sta] = 0
# Create a new row
is_storming = True
time_steps_since_precip = 0
# Always add the latest end date to avoid unclosed storms
new_storm['end'] = time
# Accumulate precip for storm total
for sta, mass in row.iteritems():
new_storm[sta] += mass
elif is_storming and time_steps_since_precip < steps_thresh:
new_storm['end'] = time
time_steps_since_precip += 1
if time_steps_since_precip >= steps_thresh and is_storming:
is_storming = False
storms.append(new_storm)
# print "=="*10 + "> not storming!"
# Append the last storm if we ended during a storm
if is_storming:
storms.append(new_storm)
storm_count = len(storms)
# Make sure we have storms
if storm_count == 0:
empty_data = {}
for col in storm_columns:
empty_data[col] = []
storms = pd.DataFrame(empty_data)
else:
storms = pd.DataFrame(storms)
return storms, storm_count
def tracking_by_basin(precipitation, time, storm_lst, time_steps_since_precip,
is_storming, mass_thresh=0.01, steps_thresh=2):
"""
Args:
precipitation: precipitation values
time: Time step that smrf is on
time_steps_since_precip: time steps since the last precipitation
storm_lst: list that store the storm cycles in order. A storm is
recorded by its start and its end. The list
is passed by reference and modified internally.
Each storm entry should be in the format of:
[{start:Storm Start, end:Storm End}]
e.g.
[
{start:date_time1,end:date_time2},
{start:date_time3,end:date_time4},
]
#would be a two storms
mass_thresh: mass amount that constitutes a real precip
event, default = 0.0.
steps_thresh: Number of time steps that constitutes the end of
a precip event, default = 2 steps (default 2 hours)
Returns:
tuple:
storm_lst - updated storm_lst
time_steps_since_precip - updated time_steps_since_precip
is_storming - True or False whether the storm is ongoing or not
Created March 3, 2017
@author: <NAME>
"""
# print "--"*10 +"> Max precip = {0}".format(precipitation.max())
if precipitation.max() > mass_thresh:
# Start a new storm
if len(storm_lst) == 0 or not is_storming:
storm_lst.append({'start': time, 'end': None})
is_storming = True
# always append the most recent timestep to avoid unended storms
storm_lst[-1]['end'] = time
time_steps_since_precip = 0
elif is_storming and time_steps_since_precip < steps_thresh:
time_steps_since_precip += 1
if time_steps_since_precip >= steps_thresh:
is_storming = False
# print "--"*10 + "> not storming!"
return storm_lst, time_steps_since_precip, is_storming
def clip_and_correct(precip, storms, stations=[]):
"""
Meant to go along with the storm tracking, we correct the data here by
adding in the precip we would miss by ignoring it. This is mostly because
will get rain on snow events when there is snow because of the storm
definitions and still try to distribute precip data.
Args:
precip: Vector station data representing the measured precipitation
storms: Storm list with dictionaries as defined in
:func:`~smrf.envphys.storms.tracking_by_station`
stations: Desired stations that are being used for clipping. If
stations is not passed, then use all in the dataframe
Returns:
The correct precip that ensures there is no precip outside of the
defined storms with the clipped amount of precip proportionally added
back to storms.
Created May 3, 2017
@author: <NAME>
"""
# Specify zeros where were not storming
precip_clipped = precip.copy()
precip_clipped[:] = 0
for j, storm in storms.iterrows():
storm_start = storm['start']
storm_end = storm['end']
my_slice = precip.loc[storm_start:storm_end]
precip_clipped.loc[storm_start:storm_end] = my_slice
correction = {}
if len(stations) == 0:
stations = precip.columns
# Correct the precip to be equal to the sum.
for station in stations:
original = precip[station].sum()
clipped = precip_clipped[station].sum()
if original == 0:
c = 1.0
elif clipped == 0:
c = 0
else:
c = original/clipped
correction[station] = c
return precip_clipped.mul(
|
pd.Series(correction)
|
pandas.Series
|
import sys
from copy import copy
import pandas as pd
from backlogapi import BacklogClient, Issue, IssueAttachment
_GET_ISSUE_QUERY = {
'projectId[]': 0,
'count': 100,
'createdUntil': '2000-01-01',
'updatedUntil': '2000-01-01',
'attachment': 'true',
}
def kei_get_project_id(client):
"""
Get all project and return project id, name
"""
projects = client.project.all()
return [(x.id, x.name) for x in projects]
def kei_get_issues(client, query):
issues = client.issue.filter(query_params=query, upper_limit=True)
for issue in issues:
yield issue
def create_issues_report(issue_gene, csv_data_name):
issues = [(x.id, x.name, x.issue_key, x.created, x.updated, x.parend_issue_id)
for x in list(copy(issue_gene))]
issues_frame = pd.DataFrame(issues,
columns=['issue_id',
'issue_name',
'issue_key',
'created',
'updated',
'parentIssueId']).fillna(0)
issues_frame.to_csv(csv_data_name + '.csv', encoding='utf-8', index=False)
return issues_frame
def kei_delete_issues(client, issues):
for issue_id in issues['issue_id']:
try:
client.issue.delete(issue_id)
except Exception:
print('Invalid issue id')
print('Success')
def kei_delete_attachments(client, attachments):
for issue_id, attachment_id in zip(attachments['issue_id'], attachments['attachment_id']):
try:
Issue(client, issue_id).get_attachments(attachment_id)[0].delete()
except Exception:
print('Invalid issue id or attachment id')
print('Success')
def main(flag, client):
if flag == 'projects':
projects = pd.DataFrame(kei_get_project_id(client), columns=['project_id', 'project_name'])
projects.to_csv('projects-report.csv', index=False)
elif flag == 'issues_list.csv':
query_data =
|
pd.read_csv(flag)
|
pandas.read_csv
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C":
|
date_range("20130101", periods=3, tz="CET")
|
pandas.date_range
|
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
|
tm.assert_index_equal(ordered, expected[::-1])
|
pandas.util.testing.assert_index_equal
|
"""
Функции и классы для проведения WoE-преобразований
"""
import math
import warnings
import numpy as np
import pandas as pd
import sklearn as sk
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
class _GroupedPredictor(pd.DataFrame):
"""
Вспомогательный класс для удобства доступа к некоторым данным
"""
def get_predictor(self, x):
"""
Получение подвыборки по имени предиктора(ов)
Parameters
---------------
x : str/int/list-like
Предиктор или список предикторов
Returns:
-----------
self : pd.DataFrame
Часть датафрейма (самого себя)
"""
if isinstance(x, (list, set, tuple)):
return self[self["predictor"].isin(x)]
else:
return self[self["predictor"] == x]
def append(self, other):
return _GroupedPredictor(super().append(other))
class WoeTransformer(TransformerMixin, BaseEstimator):
"""Класс для построения и применения WOE группировки к датасету
Parameters
----------
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
save_data : bool, default False
Параметр, определяющий, нужно ли сохранить данные для обучения
трансформера внутри экземпляра класса
join_bad_categories : bool, default False
Определяет, должени ли трансформер предпринять попытку для объединения
катогориальных групп в более крупные
Warning
-------
join_bad_categories - Экспериментальная функция.
Способ группировки категорий нестабилен
Attributes
----------
stats : pandas.DataFrame
Результаты WOE-группировки по всем предикторам
predictors : list
Список предикторов, на которых была построена группировка
cat_values : dict[str, list]
Словарь со списками категорий по предикторам, переданный при обучении
alpha_values : dict[str, float]
Словарь со значениями alpha для регуляризации групп
possible groups : pandas.DataFrame
Данные о значениях предиктора, которые могли бы стать
отдельными категориями
bad_groups : pandas.DataFrame
Данные о группах, которые не удовлетворяют условиям
"""
def __repr__(self):
return "WoeTransformer(min_sample_rate={!r}, min_count={!r}, n_fitted_predictors={!r})".format(
self.min_sample_rate,
self.min_count,
len(self.predictors),
)
def __init__(
self,
min_sample_rate: float = 0.05,
min_count: int = 3,
save_data: bool = False,
join_bad_categories: bool = False,
):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alpha_values = {}
self.save_data = save_data
self.join_bad_categories = join_bad_categories
# -------------------------
# Функции интерфейса класса
# -------------------------
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
# Сохранение категориальных значений
self.cat_values = cat_values
# Валидация данных и решейпинг
if hasattr(self, "_validate_data"):
X, y = self._validate_and_convert_data(X, y)
if self.save_data:
self.data = X
self.target = y
# Инициализация коэффициентов для регуляризации групп
self.alpha_values = {i: 0 for i in X.columns}
self.alpha_values.update(alpha_values)
# Агрегация значений предикторов
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def transform(self, X, y=None):
"""
Применение обученного трансформера к новым данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм, который нужно преобразовать
Предикторы, которые не были сгруппированы ранее, будут
проигнорированы и выведется сообщение
y : pandas.Series
Игнорируется
Returns
-----------
transformed : pandas.DataFrame
Преобразованный датасет
"""
transformed = pd.DataFrame()
if hasattr(self, "_validate_data"):
try:
X, y = self._validate_and_convert_data(X, y)
except AttributeError:
pass
for i in X:
if i in self.predictors:
try:
transformed[i] = self._transform_single(X[i])
except Exception as e:
print(f"Transform failed on predictor: {i}", e)
else:
print(f"Column is not in fitted predictors list: {i}")
return transformed
def fit_transform(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
с последующим примененим группировки к тем же данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pandas.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-----------
transformed : pd.DataFrame
Преобразованный датасет
"""
self.fit(X, y, cat_values=cat_values, alpha_values=alpha_values)
return self.transform(X)
def plot_woe(self, predictors=None):
"""
Отрисовка одного или нескольких графиков группировки
Parameters
---------------
predictors : str or array, default None
Предиктор(ы), по которым нужны графики
-- если str - отрисовывается один график
-- если array - отрисовываются графики из списка
-- если None - отрисовываются все сгруппированные предикторы
Warning
-------
Запуск метода без аргументов может занять длительное время при большом
количестве предикторов
"""
if predictors is None:
predictors = self.predictors
elif isinstance(predictors, str):
predictors = [predictors]
elif isinstance(predictors, (list, tuple, set)):
predictors = predictors
_, axes = plt.subplots(figsize=(10, len(predictors) * 5), nrows=len(predictors))
try:
for i, col in enumerate(predictors):
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes[i])
except TypeError:
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes)
# return fig
def get_iv(self, sort=False):
"""Получение списка значений IV по предикторам
Parameters
----------
sort : bool, default False
Включает сортировку результата по убыванию IV
Returns
-------
pandas.Series
"""
try:
res = self.stats.groupby("predictor")["IV"].sum()
if sort:
res = res.sort_values(ascending=False)
res = dict(res)
except AttributeError as e:
print(f"Transformer was not fitted yet. {e}")
res = {}
return res
# -------------------------
# Внутренние функции над всем датасетом
# -------------------------
def _validate_and_convert_data(self, X, y):
"""Проверяеn входные данные, трансформирует в объекты pandas
Использует метод _validate_data из sklearn/base.py
"""
if hasattr(X, "columns"):
predictors = X.columns
else:
predictors = ["X" + str(i + 1) for i in range(X.shape[1])]
if y is None:
X_valid = self._validate_data(X, y, dtype=None, force_all_finite=False)
X_valid = pd.DataFrame(X, columns=predictors)
y_valid = None
else:
X_valid, y_valid = self._validate_data(
X, y, dtype=None, force_all_finite=False
)
y_valid = pd.Series(y, name="target")
X_valid = pd.DataFrame(X, columns=predictors)
return X_valid, y_valid
def _grouping(self, X, y):
"""
Применение группировки ко всем предикторам
"""
df = X.copy()
df = df.fillna("пусто")
df["target"] = y.copy()
# Группировка и расчет показателей
for col in df.columns[:-1]:
grouped_temp = self._group_single(df[col], y)
num_mask = self._get_nums_mask(grouped_temp["value"])
cat_val_mask = grouped_temp["value"].isin(self.cat_values.get(col, []))
is_all_categorical = all(~num_mask | cat_val_mask)
if self.join_bad_categories and is_all_categorical:
repl = self._get_cat_values_for_join(grouped_temp)
grouped_temp = self._group_single(df[col].replace(repl), y)
self.grouped = self.grouped.append(grouped_temp)
# Замена пустых значений обратно на np.nan ИЛИ преобразование в числовой тип
try:
self.grouped["value"] = self.grouped["value"].replace({"пусто": np.nan})
except TypeError:
self.grouped["value"] = pd.to_numeric(
self.grouped["value"], downcast="signed"
)
def _fit_numeric(self, X, y):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
Returns
-------
None
"""
res = pd.DataFrame()
for i in X:
res_i = self._fit_single(X[i], y)
res = res.append(res_i)
self.predictors.append(i)
self.stats = self.stats.append(res)
# -------------------------
# Внутренние функции над отдельными столбцами
# -------------------------
def _group_single(self, x, y):
"""
Агрегация данных по значениям предиктора.
Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений
и долю целевых в группе
Parameters:
---------------
X : pandas.DataFrame
Таблица данных для агрегации
y : pandas.Series
Целевая переменная
"""
col = x.name
df = pd.DataFrame({col: x.values, "target": y.values})
grouped_temp = df.groupby(col)["target"].agg(["count", "sum"]).reset_index()
grouped_temp.columns = ["value", "sample_count", "target_count"]
grouped_temp["sample_rate"] = (
grouped_temp["sample_count"] / grouped_temp["sample_count"].sum()
)
grouped_temp["target_rate"] = (
grouped_temp["target_count"] / grouped_temp["sample_count"]
)
grouped_temp.insert(0, "predictor", col)
return _GroupedPredictor(grouped_temp)
def _fit_single(self, x, y, gr_subset=None, cat_vals=None):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
gr_subset : _GroupedPredictor
Предиктор
"""
gr_subset_num = pd.DataFrame()
gr_subset_cat = pd.DataFrame()
col = x.name
if gr_subset is None:
gr_subset = self.grouped.get_predictor(col)
if cat_vals is None:
cat_vals = self.cat_values.get(col, [])
nan_mask = x.isna()
num_mask = self._get_nums_mask(x) & (~x.isin(cat_vals)) & (~nan_mask)
num_vals = x.loc[num_mask].unique()
try:
# Расчет коэффициентов тренда по числовым значениям предиктора
if num_mask.sum() > 0:
try:
poly_coefs = np.polyfit(
x.loc[num_mask].astype(float), y.loc[num_mask], deg=1
)
except np.linalg.LinAlgError as e:
print(f"Error in np.polyfit on predictor: '{col}'.\nError MSG: {e}")
print("Linear Least Squares coefficients were set to [1, 0]")
poly_coefs = np.array([1, 0])
self.trend_coefs.update({col: poly_coefs})
# Расчет монотонных границ
gr_subset_num = gr_subset[gr_subset["value"].isin(num_vals)].copy()
gr_subset_num["value"] = pd.to_numeric(gr_subset_num["value"])
gr_subset_num = gr_subset_num.sort_values("value")
borders = self._monotonic_borders(gr_subset_num, self.trend_coefs[col])
self.borders.update({col: borders})
# Применение границ к сгруппированным данным
gr_subset_num["groups"] = pd.cut(gr_subset_num["value"], borders)
gr_subset_num["type"] = "num"
except ValueError as e:
print(f"ValueError on predictor {col}.\nError MSG: {e}")
# Расчет коэффициентов тренда по категориальным значениям предиктора
if (~num_mask).sum() > 0:
gr_subset_cat = gr_subset[~gr_subset["value"].isin(num_vals)].copy()
gr_subset_cat["groups"] = gr_subset_cat["value"].fillna("пусто")
gr_subset_cat["type"] = "cat"
# Объединение числовых и категориальных значений
gr_subset = pd.concat([gr_subset_num, gr_subset_cat], axis=0, ignore_index=True)
# Расчет WOE и IV
alpha = self.alpha_values.get(col, 0)
res_i = self._statistic(gr_subset, alpha=alpha)
is_empty_exists = any(res_i["groups"].astype(str).str.contains("пусто"))
if is_empty_exists:
res_i["groups"].replace({"пусто": np.nan}, inplace=True)
return res_i
def _transform_single(self, x, stats=None):
"""
Применение группировки и WoE-преобразования
Parameters
---------------
x : pandas.Series
Значения предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
orig_index = x.index
X_woe = x.copy()
if stats is None:
stats = self.stats.get_predictor(X_woe.name)
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "num"
}
cat_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = stats.loc[stats["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = stats.loc[stats["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(stats.loc[stats["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = pd.to_numeric(
X_woe[(self._get_nums_mask(X_woe)) & (~X_woe.isin(cat_bounds))]
)
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = stats.loc[stats["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(x):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = pd.Series()
X_woe = pd.concat([X_woe_num, X_woe_cat, X_woe_oth]).reindex(orig_index)
X_woe = pd.to_numeric(X_woe, downcast="signed")
return X_woe
def _monotonic_borders(self, grouped, p):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
DF_grouping = grouped.copy().sort_values("value").reset_index()
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.iloc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = (
DF_iter["sample_count"] - DF_iter["target_count"]
)
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = self._check_groups(DF_iter)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = pd.concat([DF_j[["value"]], DF_iter], axis=1)
try:
min_ind = DF_iter.loc[
(DF_iter["check"]) & (DF_iter["opt"]), "target_rate"
].index.values[0]
score_j = DF_iter.loc[min_ind, "value"]
if (
len(R_borders) > 0 and score_j == R_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
except Exception:
break
min_ind += 1
R_borders.append(score_j)
# Проверка последней добавленной группы
if len(R_borders) > 0:
DF_iter = DF_grouping.loc[DF_grouping["value"] > R_borders[-1]]
sample_rate_i = DF_iter["sample_rate"].sum() # доля выборки
sample_count_i = DF_iter["sample_count"].sum() # количество наблюдений
target_count_i = DF_iter["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < self.min_sample_rate)
or (target_count_i < self.min_count)
or (non_target_count_i < self.min_count)
):
R_borders.remove(R_borders[-1]) # удаление последней границы
else:
predictor = DF_grouping["predictor"].iloc[0]
warnings.warn(
f"Couldn't find any borders for feature {predictor}.\n Borders set on (-inf, +inf)"
)
R_borders = [-np.inf] + R_borders + [np.inf]
return R_borders
def _check_groups(
self,
df,
sample_rate_col="sample_rate",
sample_count_col="sample_count",
target_count_col="target_count",
):
""" Проверить сгруппированные значения предиктора на соответствме условиям"""
cond_mask = (
(df[sample_rate_col] >= self.min_sample_rate - 10 ** -9)
& (df[sample_count_col] >= self.min_count)
& (df[target_count_col] >= self.min_count)
)
return cond_mask
def _get_cat_values_for_join(self, grouped):
"""Получить словарь для замены категорий на объединяемые
NOTE: Нужно тестирование
TODO: переписать на рекурсию
"""
df = grouped.copy()
cond_mask = ~self._check_groups(df)
res = df[
[
"predictor",
"value",
"sample_count",
"target_count",
"sample_rate",
"target_rate",
]
].copy()
res = res.sort_values(["sample_rate", "target_rate"])
res["cum_sample_rate"] = res["sample_rate"].cumsum()
res["check"] = cond_mask
res["check_reverse"] = ~cond_mask
res["check_diff"] = res["check"].astype(int).diff()
res["new_group"] = (res["check_diff"] == -1).astype(int)
res["exist_group"] = res["check_reverse"].astype(int).eq(1)
res.loc[~res["check_reverse"], "exist_group"] = np.NaN
res["exist_group_cum"] = res["exist_group"].cumsum().fillna(method="bfill")
res[["cum_sr", "cum_sc", "cum_tc"]] = res.groupby("exist_group_cum").agg(
{
"sample_rate": "cumsum",
"sample_count": "cumsum",
"target_count": "cumsum",
}
)
res["cum_sr_check"] = (
self._check_groups(res, "cum_sr", "cum_sc", "cum_tc")
.astype(int)
.diff()
.eq(1)
.astype(int)
.shift()
)
display(res)
res.loc[res["cum_sr_check"] != 1, "cum_sr_check"] = np.nan
res["cum_sr_check"] = res["cum_sr_check"].fillna(method="ffill").fillna(0)
res["group_number"] = res["exist_group_cum"] + res["cum_sr_check"]
repl = res.groupby("group_number").agg({"value": list}).to_dict()["value"]
repl = {k: "_".join(v) for k, v in repl.items()}
res["group_vals"] = res["group_number"].replace(repl)
t = dict(zip(res["value"], res["group_vals"]))
return t
def _plot_single_woe_grouping(self, stats, ax_pd=None):
"""
Построение графика по группировке предиктора
Parameters
---------------
stats : pandas.DataFrame
Статистика по каждой группе (результат работы функции statistic):
минимальное, максимальное значение, доля от общего объема выборки,
количество и доля целевых и нецелевых событий в каждой группе,
WOE и IV каждой группы
Должен содержать столбцы: 'sample_rate', 'target_rate', 'WOE'
ax_pd : matplotlib.Axes
Набор осей (subplot)
"""
# Расчеты
x2 = [stats["sample_rate"][:i].sum() for i in range(stats.shape[0])] + [
1
] # доля выборки с накоплением
x = [
np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)
] # средняя точка в группах
# Выделение нужной информации для компактности
woe = list(stats["WOE"])
height = list(stats["target_rate"]) # проблемность в группе
width = list(stats["sample_rate"]) # доля выборки на группу
# Визуализация
if ax_pd is None:
_, ax_pd = plt.subplots(figsize=(8, 5))
# Столбчатая диаграмма доли целевых в группах
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
# График значений WOE по группам
ax_woe = ax_pd.twinx() # дубликат осей координат
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
# Линия нулевого значения WOE
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
# Настройка осей координат
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# Расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
# Границы и сетка для столбчатой диаграммы
ax_pd.set_ylim([0, max_pd])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_pd.legend(bbox_to_anchor=(1.05, 0.83), loc=[0.2, -0.25], fontsize=14)
# Границы и сетка для графика WOE
ax_woe.set_ylim([-max_woe, max_woe])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
ax_woe.legend(bbox_to_anchor=(1.05, 0.92), loc=[0.6, -0.25], fontsize=14)
plt.title(
"Группировка предиктора {}".format(stats.loc[0, "predictor"]), fontsize=18
)
# Для категориальных
n_cat = stats.loc[stats["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(bbox_to_anchor=(1.05, 0.76), loc=[0.15, -0.33], fontsize=14)
plt.tight_layout()
def _get_possible_groups(self):
"""
Поиск возможных групп в значениях предикторов после агрегации
"""
self.possible_groups = pd.DataFrame()
# Выделение значений предиктора с достаточным кол-вом наблюдений и
# не отмеченных, как категориальные
for i in self.predictors:
cat_vals = self.cat_values.get(i, [])
DF_i1 = self.grouped.get_predictor(i).copy()
DF_i1 = DF_i1.loc[
(DF_i1["sample_rate"] > self.min_sample_rate)
& (~DF_i1["value"].isin(cat_vals))
]
# Выделение всех значений предиктора, не отмеченных, как категориальные
DF_i2 = self.grouped.get_predictor(i).copy()
DF_i2 = DF_i2.loc[(~DF_i2["value"].isin(cat_vals))]
# Выбор значений: которые не равны бесконености и при этом не являются числами
L = ~(DF_i2["value"] == np.inf) & (~(self._get_nums_mask(DF_i2["value"])))
DF_i2 = DF_i2.loc[L]
# Объединение найденных значений в одну таблицу
DF_i = pd.concat((DF_i1, DF_i2), ignore_index=True).drop_duplicates()
self.possible_groups = self.possible_groups.append(DF_i)
def _get_bad_groups(self):
"""
Поиск групп: не удовлетворяющих условиям
"""
self.bad_groups = self.stats.loc[
(self.stats["sample_rate"] < self.min_sample_rate)
| (self.stats["target_count"] < self.min_count)
| (self.stats["sample_count"] - self.stats["target_count"] < self.min_count)
]
def _regularize_groups(self, stats, alpha=0):
"""расчет оптимальной целевой для группы на основе готовой woe-группировки
формула и детали в видео
https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
pd = (y_local * K + Y_global * alpha) / (K + alpha)"""
Y_global = stats["target_count"].sum() / stats["sample_count"].sum()
K = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = (stats["target_rate"] * K + Y_global * alpha) / (
K + alpha
)
stats["target_count"] = np.floor(
stats["sample_count"] * stats["target_rate"]
).astype(int)
return stats
def _statistic(self, grouped, alpha=0):
"""
Расчет статистики по группам предиктора: минимальное, максимальное значение, доля от
общего объема выборки, количество и доля целевых и нецелевых событий в каждой группе
А также расчет WOE и IV каждой группы
Parameters
---------------
grouped : pandas.DataFrame
Данные полученных групп предиктора. Кол-во строк совпадает с кол-вом
уникальных значений предиктора.
Должен содержать столбцы: 'sample_count', 'target_count', 'groups'
alpha : float, default 0
Коэффициент регуляризации групп
Returns
---------------
stats : pandas.DataFrame
Агрегированные данные по каждой группе
"""
nothing = 10 ** -6
stats = grouped.groupby(["predictor", "groups"], sort=False).agg(
{
"type": "first",
"sample_count": "sum",
"target_count": "sum",
"value": ["min", "max"],
},
)
stats.columns = ["type", "sample_count", "target_count", "min", "max"]
stats.reset_index(inplace=True)
stats["sample_rate"] = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = stats["target_count"] / stats["sample_count"]
stats = self._regularize_groups(stats, alpha=alpha)
# Расчет WoE и IV
samples_num = stats["sample_count"].sum()
events = stats["target_count"].sum()
non_events = samples_num - events
stats["non_events_i"] = stats["sample_count"] - stats["target_count"]
stats["event_rate_i"] = stats["target_count"] / (events + nothing)
stats["non_event_rate_i"] = stats["non_events_i"] / (non_events + nothing)
stats["WOE"] = np.log(
stats["non_event_rate_i"] / (stats["event_rate_i"] + nothing) + nothing
)
stats["IV"] = stats["WOE"] * (stats["non_event_rate_i"] - stats["event_rate_i"])
return stats
def _calc_trend_coefs(self, x, y):
"""
Расчет коэффициентов тренда
Parameters
---------------
x : pandas.Series
Значения предиктора
y : pandas.Series
Целевая переменная
Returns
-----------
dict[str, tuple[float, float]]
"""
return {x.name: np.polyfit(x, y, deg=1)}
# Служебные функции
def _reset_state(self):
self.trend_coefs = {}
self.borders = {}
self.cat_values = {}
self.predictors = []
self.grouped = _GroupedPredictor()
self.stats = _GroupedPredictor()
def _get_nums_mask(self, x):
# if x.apply(lambda x: isinstance(x, str)).sum() == len(x):
# return pd.Series(False, index=x.index)
# else:
# mask = pd.to_numeric(x, errors="coerce").notna()
mask = pd.to_numeric(x, errors="coerce").notna()
return mask
class WoeTransformerRegularized(WoeTransformer):
"""
Класс для построения и применения WOE группировки к датасету с применением
регуляризации малых групп
"""
def __init__(self, min_sample_rate=0.05, min_count=3, alphas=None, n_seeds=30):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alphas = 100 if alphas is None else alphas
self.alpha_values = {}
self.n_seeds = n_seeds
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
self.cat_values = cat_values
self.regularization_stats = _GroupedPredictor()
for col in tqdm(X.columns, desc="Searching alphas"):
temp_alpha = self._cat_features_alpha_logloss(
X[col], y, self.alphas, self.n_seeds
)
self.alpha_values.update({col: temp_alpha})
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def _cat_features_alpha_logloss(self, x, y, alphas, seed=30):
"""
функция расчета IV, GINI и logloss для категориальных
переменных с корректировкой целевой по alpha
"""
# задаем промежуточную функцию для WOE преобразования переменной из исходного датафрейма
# по рассчитанным WOE из IVWOE
def calc_woe_i(row_value, stats):
return stats.loc[stats["groups"] == row_value, "WOE"].values[0]
predictor = x.name
target = y.name
df = pd.DataFrame({predictor: x.values, target: y.values})
df[predictor] = df[predictor].fillna("NO_INFO")
L_logloss_mean = []
GINI_IV_mean = []
for alpha_i in alphas:
logloss_i = []
GINI_i = []
IV_i = []
for seed_i in range(seed):
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=seed_i, stratify=y
)
# Группировка значений предиктора с текущим alpha
df_i = self._group_single(X_train, y_train)
df_i["groups"] = df_i["value"].fillna("пусто")
df_i["type"] = "cat"
# Обучение и применение группировки к обучающему набору
WOE_i = self._fit_single(X_train, y_train, df_i)
WOE_i = self._regularize_groups(WOE_i, alpha_i)
# расчет оптимальной целевой для группы, формула и детали в видео
# https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
# pd = (y_local * K + Y_global * alpha) / (K + alpha)
Y_global = y_train.mean()
K = WOE_i["sample_count"] / WOE_i["sample_count"].sum()
WOE_i["target_rate"] = (
WOE_i["target_rate"] * K + Y_global * alpha_i
) / (K + alpha_i)
WOE_i["target_count"] = np.floor(
WOE_i["sample_count"] * WOE_i["target_rate"]
).astype(int)
X_test_WOE = self._transform_single(X_test, WOE_i)
roc_auc_i = sk.metrics.roc_auc_score(y_test, X_test_WOE)
# Подстановка регуляризованной доли целевой вместо каждой группы
target_transformed = X_test_WOE.replace(
dict(zip(WOE_i["WOE"], WOE_i["target_rate"]))
)
# Запись значений
logloss_i.append(
sk.metrics.log_loss(y_test, target_transformed.fillna(0))
)
IV_i.append(WOE_i["IV"].sum())
GINI_i.append(abs(2 * roc_auc_i - 1))
# Запись средних значений
L_logloss_mean.append([alpha_i, np.mean(logloss_i)])
GINI_IV_mean.append([alpha_i, np.mean(GINI_i), np.mean(IV_i)])
alpha_GINI_IV = pd.DataFrame(GINI_IV_mean, columns=["alpha", "GINI", "IV"])
alpha_GINI_IV.insert(0, "predictor", predictor)
self.regularization_stats = self.regularization_stats.append(alpha_GINI_IV)
# Индекс значения alpha с наименьшим логлоссом
min_logloss_ind = np.argmin(L_logloss_mean, axis=0)[1]
alpha_opt = L_logloss_mean[min_logloss_ind][0]
return alpha_opt
########################
# Комплект ускоренных версий функции #
########################
# Сильно отстал от класса, но в точности повторяет функциональность Vanilla
def grouping(DF_data_i, low_acc=False):
"""
Агрегация данных по значениям предиктора. Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений и долю целевых в группе
Parameters
---------------
DF_data_i : pandas.DataFrame
Таблица данных для агрегации, должна содержать поля 'predictor' и 'target'.
Поле target при этом должно состоять из 0 и 1, где 1 - целевое событие
low_acc : int, default None
Параметр для округления значений предиктора.
Если None, то предиктор не округляется.
Если целое неотрицательное число, параметр используется для определения
количества знаков после запятой, остальные значения игнорируются
Returns
---------------
DF_grouping : pandas.DataFrame
Таблица с агрегированными данными по значениям предиктора
"""
# Округение, если аргумент принимает допустимые значения
if low_acc and type(low_acc) is int and low_acc > 0:
DF_data_i = DF_data_i[["predictor", "target"]].round(low_acc)
# Группировка и расчет показателей
DF_grouping = (
DF_data_i.groupby("predictor")["target"].agg(["count", "sum"]).reset_index()
)
DF_grouping.columns = ["predictor", "sample_count", "target_count"]
DF_grouping["sample_rate"] = (
DF_grouping["sample_count"] / DF_grouping["sample_count"].sum()
)
DF_grouping["target_rate"] = (
DF_grouping["target_count"] / DF_grouping["sample_count"]
)
return DF_grouping
def monotonic_borders(DF_grouping, p, min_sample_rate=0.05, min_count=3):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.loc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = DF_iter["sample_count"] - DF_iter["target_count"]
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = (
(DF_iter["sample_rate"] >= min_sample_rate - 10 ** -9)
& (DF_iter["target_count"] >= min_count)
& (DF_iter["non_target_count"] >= min_count)
)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = pd.concat([DF_j[["predictor"]], DF_iter], axis=1)
try:
min_ind = DF_iter.loc[
(DF_iter["check"]) & (DF_iter["opt"]), "target_rate"
].index.values[0]
score_j = DF_iter.loc[min_ind, "predictor"]
if (
len(R_borders) > 0 and score_j == R_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
except Exception:
break
min_ind += 1
R_borders.append(score_j)
# Проверка последней добавленной группы
DF_iter = DF_grouping.loc[DF_grouping["predictor"] > R_borders[-1]]
sample_rate_i = DF_iter["sample_rate"].sum() # доля выборки
sample_count_i = DF_iter["sample_count"].sum() # количество наблюдений
target_count_i = DF_iter["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < min_sample_rate)
or (target_count_i < min_count)
or (non_target_count_i < min_count)
):
R_borders.remove(R_borders[-1]) # удаление последней границы
return R_borders
# Статистика
def statistic(DF_groups):
"""
Расчет статистики по группам предиктора: минимальное, максимальное значение, доля от
общего объема выборки, количество и доля целевых и нецелевых событий в каждой группе
А также расчет WOE и IV каждой группы
Parameters
---------------
DF_groups : pandas.DataFrame
Данные полученных групп предиктора. Кол-во строк совпадает с кол-вом
уникальных значений предиктора.
Должен содержать столбцы: 'sample_count', 'target_count', 'groups'
Returns
---------------
DF_statistic : pandas.DataFrame
Агрегированные данные по каждой группе
"""
nothing = 10 ** -6
DF_statistic = (
DF_groups[["sample_count", "target_count", "groups"]]
.groupby("groups", as_index=False, sort=False)
.sum()
)
DF_statistic_min = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.min()
)
DF_statistic_max = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.max()
)
DF_statistic["min"] = DF_statistic_min["predictor"]
DF_statistic["max"] = DF_statistic_max["predictor"]
DF_statistic["sample_rate"] = (
DF_statistic["sample_count"] / DF_statistic["sample_count"].sum()
)
DF_statistic["target_rate"] = (
DF_statistic["target_count"] / DF_statistic["sample_count"]
)
# Расчет WoE и IV
samples_num = DF_statistic["sample_count"].sum()
events = DF_statistic["target_count"].sum()
non_events = samples_num - events
DF_statistic["non_events_i"] = (
DF_statistic["sample_count"] - DF_statistic["target_count"]
)
DF_statistic["event_rate_i"] = DF_statistic["target_count"] / (events + nothing)
DF_statistic["non_event_rate_i"] = DF_statistic["non_events_i"] / (
non_events + nothing
)
DF_statistic["WOE"] = np.log(
DF_statistic["non_event_rate_i"] / (DF_statistic["event_rate_i"] + nothing)
+ nothing
)
DF_statistic["IV"] = DF_statistic["WOE"] * (
DF_statistic["non_event_rate_i"] - DF_statistic["event_rate_i"]
)
DF_statistic = DF_statistic.merge(
DF_groups[["type", "groups"]].drop_duplicates(), how="left", on="groups"
)
return DF_statistic
# Графики
def group_plot(DF_result):
"""
Построение графика по группировке предиктора
Parameters
---------------
DF_result : pandas.DataFrame
Статистика по каждой группе (результат работы функции statistic):
минимальное, максимальное значение, доля от общего объема выборки,
количество и доля целевых и нецелевых событий в каждой группе,
WOE и IV каждой группы
Должен содержать столбцы: 'sample_rate', 'target_rate', 'WOE'
Returns
---------------
None
Не возвращает ничего
"""
# Расчеты
sample_rate, target_rate, WOE = ["sample_rate", "target_rate", "WOE"]
x2 = [DF_result[sample_rate][:i].sum() for i in range(DF_result.shape[0])] + [
1
] # доля выборки с накоплением
x = [np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)] # средняя точка в группах
# Выделение нужной информации для компактности
woe = list(DF_result[WOE])
height = list(DF_result[target_rate]) # проблемность в группе
width = list(DF_result[sample_rate]) # доля выборки на группу
# Визуализация
fig, ax_pd = plt.subplots(figsize=(8, 5))
# Столбчатая диаграмма доли целевых в группах
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
# График значений WOE по группам
ax_woe = ax_pd.twinx() # дубликат осей координат
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
# Линия нулевого значения WOE
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
# Настройка осей координат
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# Расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
# Границы и сетка для столбчатой диаграммы
ax_pd.set_ylim([0, max_pd])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_pd.legend(loc=[0.2, -0.25], fontsize=14)
# Границы и сетка для графика WOE
ax_woe.set_ylim([-max_woe, max_woe])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
ax_woe.legend(loc=[0.6, -0.25], fontsize=14)
plt.title("Группировка предиктора", fontsize=18)
# Для категориальных
n_cat = DF_result.loc[DF_result["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(loc=[0.15, -0.33], fontsize=14)
plt.show()
# ## Трансформер
def woe_transformer(
x,
y,
cat_values=[],
min_sample_rate=0.05,
min_count=3,
errors="skip",
low_accuracy=None,
plot=True,
verbose=True,
):
"""
Группировка значений предиктора, определение оптимальных границ и расчет WOE и IV
Parameters
---------------
x : pandas.Series
Mассив числовых значений предиктора. Не должен содержать пропущенных
значений, но может сочетать строковые и числовые
y : pandas.Series
Mассив меток класса (0, 1)
cat_values: list
Категориальные значения (пустышки и несравнимые значения).
Элементы списка должны быть строками
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
errors : str, defaulf 'skip'
Способ обработки ошибок:
'skip' - не возвращать ничего в случае ошибки
'origin' - вернуть исходные значения предиктора
'raise' - бросить исключение
low_accuracy : int, default None
Режим пониженной точности (округление при группировке)
Если None, то предиктор не округляется.
Если целое неотрицательное число, параметр используется для определения
количества знаков после запятой, остальные значения игнорируются
plot : bool, default True
Включение/выключение визуализации группировки
verbose : bool, default True
Включение.выключение доп. информации по группировке
Returns
---------------
DF_result : pandas.DataFrame
Таблица с итоговой группировкой и статистикой
"""
if errors not in ["skip", "raise"]:
warnings.warn(
f"Attribute `errors` must be one of ['skip', 'raise']. Passed {errors}.\n\
Defaulting to 'skip'"
)
errors = "skip"
# Обработка входных данных
DF_data_i = pd.DataFrame({"predictor": x, "target": y})
# Агрегация данных по значениям предиктора
DF_data_gr = grouping(DF_data_i, low_accuracy)
# Проверка категориальных групп (возможные дополнительные категории)
if verbose:
# Выделение значений предиктора с достаточным кол-вом наблюдений и
# не отмеченных, как категориальные
DF_i1 = DF_data_gr.loc[DF_data_gr["sample_rate"] > min_sample_rate].loc[
~DF_data_gr["predictor"].isin(cat_values)
]
# Выделение всех значений предиктора, не отмеченных, как категориальные
DF_i2 = DF_data_gr.loc[~DF_data_gr["predictor"].isin(cat_values)]
# Выбор значений: которые не равны бесконености и при этом не являются числами
L = ~(DF_i2["predictor"] == np.inf) & (
pd.to_numeric(DF_i2["predictor"], errors="coerce").isna()
)
DF_i2 = DF_i2.loc[L]
# Объединение найденных значений в одну таблицу
DF_i = DF_i1.append(DF_i2, ignore_index=True).drop_duplicates()
if DF_i.shape[0] > 0:
print("Возможно эти значения предиктора тоже являются категориальными:")
display(DF_i)
# Выделение числовых значений предиктора
DF_data_gr_num = DF_data_gr.loc[
~DF_data_gr["predictor"].isin(cat_values)
].reset_index(drop=True)
if DF_data_gr_num.shape[0] > 0:
try:
DF_data_gr_num["predictor"] = DF_data_gr_num["predictor"].astype("float")
# Определение тренда по числовым значениям
DF_i = DF_data_i.loc[~DF_data_i["predictor"].isin(cat_values)]
p = np.polyfit(DF_i["predictor"].astype("float"), DF_i["target"], deg=1)
# Определение оптимальных границ групп
R_borders = monotonic_borders(DF_data_gr_num, p, min_sample_rate, min_count)
except Exception:
if errors == "raise":
raise ValueError("Ошибка при расчете монотонных границ")
else:
print("Ошибка при расчете монотонных границ")
try:
# Применение границ
DF_data_gr_num["groups"] = pd.cut(
DF_data_gr_num["predictor"], [-np.inf] + R_borders + [np.inf]
)
DF_data_gr_num["type"] = "num"
except Exception:
if errors == "raise":
raise ValueError("Ошибка при применении монотонных границ")
else:
print("Ошибка при применении монотонных границ")
# Добавление данных по категориальным значениям
DF_data_gr_2k = DF_data_gr.loc[
DF_data_gr["predictor"].isin(cat_values)
].reset_index(drop=True)
DF_data_gr_2k["groups"] = DF_data_gr_2k["predictor"].copy()
DF_data_gr_2k["type"] = "cat"
try:
# Расчет статистики, WoE и IV по группам числовых значений
if DF_data_gr_num.shape[0] > 0:
DF_result = statistic(
DF_data_gr_num.append(DF_data_gr_2k, ignore_index=True)
)
else:
DF_result = statistic(DF_data_gr_2k)
except Exception:
print("Ошибка при расчете статистики")
# Проверка категориальных групп (категории, которые не удовлетворяют заданным ограничениям)
if verbose:
DF_j = DF_result.loc[
(DF_result["sample_rate"] < min_sample_rate)
| (DF_result["target_count"] < min_count)
| (DF_result["sample_count"] - DF_result["target_count"] < min_count)
]
if DF_j.shape[0] > 0:
print("Эти группы не удовлетворяют заданным ограничениям:")
display(DF_j)
# Построение графика
if plot:
group_plot(DF_result)
return DF_result
def woe_apply(S_data, DF_groups):
"""
Применение группировки и WoE-преобразования
Parameters---------------
S_data : pandas.Series
Значения предиктора
DF_groups : pandas.DataFrame
Данные о группировке предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
X_woe = S_data.copy()
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
DF_groups.loc[i, "groups"]: DF_groups.loc[i, "WOE"]
for i in DF_groups.index
if DF_groups.loc[i, "type"] == "num"
}
cat_map = {
DF_groups.loc[i, "groups"]: DF_groups.loc[i, "WOE"]
for i in DF_groups.index
if DF_groups.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = DF_groups.loc[DF_groups["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = DF_groups.loc[DF_groups["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(DF_groups.loc[DF_groups["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = X_woe[
X_woe.astype(str)
.str.replace(r"\.|\-", "")
.str.replace("e", "")
.str.isdecimal()
& (~X_woe.isin(cat_bounds))
]
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = DF_groups.loc[DF_groups["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(S_data):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = pd.Series()
X_woe = pd.concat([X_woe_num, X_woe_cat, X_woe_oth]).sort_index()
return X_woe
########################
# Комплект Vanilla-версий функции #
########################
def _grouping(DF_data_i):
"""
Агрегация данных по значениям предиктора
DF_data_i[['predictor', 'target']] - таблица данных
"""
DF_i = (
DF_data_i[["predictor", "target"]].groupby("predictor", as_index=False).count()
)
DF_j = DF_data_i[["predictor", "target"]].groupby("predictor", as_index=False).sum()
DF_grouping = DF_i.merge(DF_j, how="left", on="predictor")
DF_grouping.columns = ["predictor", "sample_count", "target_count"]
DF_grouping["sample_rate"] = (
DF_grouping["sample_count"] / DF_grouping["sample_count"].sum()
)
DF_grouping["target_rate"] = (
DF_grouping["target_count"] / DF_grouping["sample_count"]
)
return DF_grouping
def _monotonic_borders(DF_grouping, p, min_sample_rate=0.05, min_count=3):
"""
Vanilla-версия функции, оставлена на всякий случай
Определение оптимальных границ групп (монотонный тренд)
DF_grouping - агрегированные данные по значениям предиктора
DF_grouping[['predictor', 'sample_count', 'target_count', 'sample_rate', 'target_rate]]
min_sample_rate - минимальный размер группы (доля от размера выборки)
min_count - минимальное количество наблюдений каждого класса в группе
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
L_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
pd_gr_i = (
k01 # средняя pd в группе. Начальные условия (зависит от общего тренда)
)
for j in range(min_ind, max(DF_grouping.index) + 1): # цикл по конечной границе
DF_j = DF_grouping.loc[min_ind:j]
sample_rate_i = DF_j["sample_rate"].sum() # доля выборки
sample_count_i = DF_j["sample_count"].sum() # количество наблюдений
target_count_i = DF_j["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
target_rate_i = target_count_i / sample_count_i
if (
(sample_rate_i < min_sample_rate)
or (target_count_i < min_count)
or (non_target_count_i < min_count)
):
continue # если граница не удовлетворяет условиям
if target_rate_i * k11 < pd_gr_i * k11: # проверка оптимальности границы
min_ind_i = j + 1
pd_gr_i = target_rate_i
score_j = DF_grouping.loc[j, "predictor"]
min_ind = min_ind_i
if (
len(L_borders) > 0 and score_j == L_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
L_borders.append(score_j)
# Проверка последней добавленной группы
DF_j = DF_grouping.loc[DF_grouping["predictor"] > L_borders[-1]]
sample_rate_i = DF_j["sample_rate"].sum() # доля выборки
sample_count_i = DF_j["sample_count"].sum() # количество наблюдений
target_count_i = DF_j["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < min_sample_rate)
or (target_count_i < min_count)
or (non_target_count_i < min_count)
):
L_borders.remove(L_borders[-1]) # удаление последней границы
return L_borders
def _statistic(DF_groups):
"""
Vanilla-версия функции, оставлена на всякий случай
Расчет статистики по группам
DF_groups[['sample_count', 'target_count', 'groups']] - таблица данных по группам
"""
nothing = 10 ** -6
DF_statistic = (
DF_groups[["sample_count", "target_count", "groups"]]
.groupby("groups", as_index=False, sort=False)
.sum()
)
DF_statistic_min = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.min()
)
DF_statistic_max = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.max()
)
DF_statistic["min"] = DF_statistic_min["predictor"]
DF_statistic["max"] = DF_statistic_max["predictor"]
DF_statistic["sample_rate"] = (
DF_statistic["sample_count"] / DF_statistic["sample_count"].sum()
)
DF_statistic["target_rate"] = (
DF_statistic["target_count"] / DF_statistic["sample_count"]
)
# Расчет WoE и IV
samples_num = DF_statistic["sample_count"].sum()
events = DF_statistic["target_count"].sum()
non_events = samples_num - events
DF_statistic["non_events_i"] = (
DF_statistic["sample_count"] - DF_statistic["target_count"]
)
DF_statistic["event_rate_i"] = DF_statistic["target_count"] / (events + nothing)
DF_statistic["non_event_rate_i"] = DF_statistic["non_events_i"] / (
non_events + nothing
)
DF_statistic["WOE"] = [
math.log(
DF_statistic["non_event_rate_i"][i]
/ (DF_statistic["event_rate_i"][i] + nothing)
+ nothing
)
for i in DF_statistic.index
]
DF_statistic["IV"] = DF_statistic["WOE"] * (
DF_statistic["non_event_rate_i"] - DF_statistic["event_rate_i"]
)
DF_statistic = DF_statistic.merge(
DF_groups[["type", "groups"]].drop_duplicates(), how="left", on="groups"
)
return DF_statistic
def _group_plot(DF_result, L_cols=["sample_rate", "target_rate", "WOE"]):
"""
Vanilla-версия функции, оставлена на всякий случай
Построение графика по группировке предиктора
DF_result - таблица данных
L_cols - список названий столбцов
L_cols = ['sample_rate', 'target_rate', 'WOE']
"""
[sample_rate, target_rate, WOE] = L_cols
fig, ax_pd = plt.subplots(figsize=(8, 5))
x2 = [DF_result[sample_rate][:i].sum() for i in range(DF_result.shape[0])] + [
1
] # доля выборки с накоплением
x = [np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)] # средняя точка в группах
woe = list(DF_result[WOE])
height = list(DF_result[target_rate]) # проблемность в группе
width = list(DF_result[sample_rate]) # доля выборки на группу
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
ax_woe = ax_pd.twinx()
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
ax_pd.set_ylim([0, max_pd])
ax_woe.set_ylim([-max_woe, max_woe])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
plt.title("Группировка предиктора", fontsize=18)
ax_pd.legend(loc=[0.2, -0.25], fontsize=14)
ax_woe.legend(loc=[0.6, -0.25], fontsize=14)
# для категориальных
n_cat = DF_result.loc[DF_result["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(loc=[0.15, -0.33], fontsize=14)
plt.show()
# %% ExecuteTime={"start_time": "2020-03-25T11:06:21.844897Z", "end_time": "2020-03-25T11:06:21.855955Z"}
def _woeTransformer(
x, y, cat_values=[], min_sample_rate=0.05, min_count=3, monotonic=True, plot=True
):
"""
Vanilla-версия функции, оставлена на всякий случай
woeTransformer - определяет оптимальные границы групп по заданным ограничениям
x - массив числовых значений предиктора
y - массив меток класса (0, 1)
cat_values - категориальные значения (пустышки и несравнимые значения - для монотонного тренда!)
min_sample_rate - минимальный размер группы (доля от размера выборки)
min_count - минимальное количество наблюдений каждого класса в группе
monotonic - монотонный тренд
"""
# Обработка входных данных
DF_data_i = pd.DataFrame()
DF_data_i["predictor"] = x
DF_data_i["target"] = y
# Агрегация данных по значениям предиктора
DF_data_gr = _grouping(DF_data_i)
# Проверка категориальных групп (возможные дополнительные категории)
# 1) возможные дополнительные категории
DF_i1 = DF_data_gr.loc[DF_data_gr["sample_rate"] > min_sample_rate].loc[
~DF_data_gr["predictor"].isin(cat_values)
]
DF_i2 = DF_data_gr.loc[~DF_data_gr["predictor"].isin(cat_values)]
L = []
for i in DF_i2["predictor"]:
try:
L.append(np.inf < i)
except Exception:
L.append(True)
DF_i2 = DF_i2.loc[
|
pd.Series(L, index=DF_i2.index)
|
pandas.Series
|
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.figure as figure
from ts_charting.figure import process_series
class Testprocess_data(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_already_aligned(self):
plot_index = pd.date_range(start="2000", freq="D", periods=100)
series = pd.Series(range(100), index=plot_index)
plot_series = process_series(series, plot_index)
tm.assert_almost_equal(series, plot_series)
|
tm.assert_almost_equal(plot_series.index, plot_index)
|
pandas.util.testing.assert_almost_equal
|
"""Fetch sample readings from sensorpush API, summarize out-of-limit periods
and upload those to StatusDB.
Seems to be one reading per minute given by the API, so nr of samples can be
seen as the number of minute fetched, for example 1440 samples for 24h.
"""
import requests
import argparse
import yaml
import os
import pytz
import datetime
import numpy as np
import pandas as pd
import logging
from couchdb import Server
class SensorPushConnection(object):
def __init__(self, email, password, verbose):
self.email = email
self.password = password
self._authorized = False
self.base_url = "https://api.sensorpush.com/api/v1"
self.access_token = None
self.verbose = verbose
def _authorize(self):
url_ending = "oauth/authorize"
url = "/".join([self.base_url, url_ending])
body_data = {"email": self.email, "password": self.password}
resp = requests.post(url, json=body_data)
assert resp.status_code == 200
authorization_value = resp.json().get("authorization")
body_data = {"authorization": "{}".format(authorization_value)}
url_ending = "oauth/accesstoken"
url = "/".join(x.strip("/") for x in [self.base_url, url_ending] if x)
resp = requests.post(url, json=body_data)
assert resp.status_code == 200
self.access_token = resp.json().get("accesstoken")
self._authorized = True
def _make_request(self, url_ending, body_data):
if not self._authorized:
self._authorize()
url = "/".join(x.strip("/") for x in [self.base_url, url_ending] if x)
auth_headers = {"Authorization": self.access_token}
attempt = 1
max_attempts = 3
while attempt <= max_attempts:
try:
resp = requests.post(url, json=body_data, headers=auth_headers)
if self.verbose:
logging.info(f"Request sent: {vars(resp.request)}")
logging.info(f"Status code: {resp.status_code}")
assert resp.status_code == 200
attempt = 3
except AssertionError:
logger.error(
f"Error fetching sensorpush data: {resp.text}, attempt {attempt} of {max_attempts}"
)
if attempt > max_attempts:
resp.raise_for_status()
attempt += 1
return resp
def get_samples(self, nr_samples, startTime=None, stopTime=None):
url = "/samples"
body_data = {
"measures": ["temperature"],
}
if nr_samples:
body_data["limit"] = nr_samples
if startTime:
body_data["startTime"] = startTime
if stopTime:
body_data["stopTime"] = stopTime
r = self._make_request(url, body_data)
return r.json()
def get_sensors(self):
url = "/devices/sensors"
body_data = {}
r = self._make_request(url, body_data)
return r.json()
class SensorDocument(object):
def __init__(
self,
original_samples,
sensor_name,
start_time,
nr_samples_requested,
limit_lower,
limit_upper,
):
self.original_samples = original_samples
self.sensor_name = sensor_name
self.start_time = start_time.strftime("%Y-%m-%dT%H:%M:%S")
self.nr_samples_requested = nr_samples_requested
self.limit_lower = limit_lower
self.limit_upper = limit_upper
self.intervals_lower = []
self.intervals_lower_extended = []
self.intervals_higher = []
self.intervals_higher_extended = []
# Save all samples around areas outside of limits, otherwise save only hourly averages
self.saved_samples = {}
def format_for_statusdb(self):
return_d = vars(self)
del return_d["original_samples"]
for interval_type in [
"intervals_lower",
"intervals_lower_extended",
"intervals_higher",
"intervals_higher_extended",
]:
return_d[interval_type] = self._interval_list_to_str(
return_d[interval_type]
)
# For convenience with the javascript plotting library, save it as a list of lists
return_d["saved_samples"] = [
[k, v] for k, v in sorted(return_d["saved_samples"].items())
]
return return_d
def _interval_list_to_str(self, input_list):
return [
(sp.strftime("%Y-%m-%dT%H:%M:%S"), ep.strftime("%Y-%m-%dT%H:%M:%S"))
for sp, ep in input_list
]
def _samples_from_intervals(self, intervals):
for interval_lower, interval_upper in intervals:
conv_lower = interval_lower.to_pydatetime()
conv_upper = interval_upper.to_pydatetime()
samples_dict = self.original_samples[conv_lower:conv_upper].to_dict()
self.saved_samples.update(
{
(k.strftime("%Y-%m-%dT%H:%M:%S"), round(v, 3))
for k, v in samples_dict.items()
}
)
def add_samples_from_intervals_lower(self):
self._samples_from_intervals(self.intervals_lower_extended)
def add_samples_from_intervals_higher(self):
self._samples_from_intervals(self.intervals_higher_extended)
def summarize_intervals(self, sample_series, limit_type):
"""Identify start- and endpoints of each out-of-limit intervals."""
# Find all time points that are more than 2 minutes apart
# closer than that and they will be considered the same interval
gaps = np.abs(np.diff(sample_series.index)) > np.timedelta64(2, "m")
# Translate into positions
gap_positions = np.where(gaps)[0] + 1
interval_points = []
extended_intervals = []
for interval in np.split(sample_series, gap_positions):
lower = interval.index[0]
upper = interval.index[-1]
interval_points.append((lower, upper))
logger.warning(
f"Interval with temperature too {limit_type} detected for {self.sensor_name} between: {lower} - {upper}"
)
# Extended interval with 1 hour in each direction
extend_lower = lower - np.timedelta64(1, "h")
extend_upper = upper + np.timedelta64(1, "h")
extended_intervals.append((extend_lower, extend_upper))
return interval_points, extended_intervals
def time_in_any_extended_interval(self, time_point):
for interval_lower, interval_upper in self.intervals_lower_extended:
if interval_lower < time_point < interval_upper:
return True
for interval_lower, interval_upper in self.intervals_higher_extended:
if interval_lower < time_point < interval_upper:
return True
return False
def sensor_limits(sensor_info):
limit_upper = None
limit_lower = None
temp_alerts = sensor_info["alerts"].get("temperature", {})
if temp_alerts.get("enabled"):
if "max" in temp_alerts:
limit_upper = to_celsius(temp_alerts["max"])
if "min" in temp_alerts:
limit_lower = to_celsius(temp_alerts["min"])
return limit_lower, limit_upper
def to_celsius(temp):
return ((temp - 32) * 5) / 9
def samples_to_df(samples_json):
data_d = {}
for sensor_id, samples in samples_json["sensors"].items():
sensor_d = {}
logging.info(f"Found {len(samples)} samples for sensor {sensor_id}")
for sample in samples:
time_point = datetime.datetime.strptime(
sample["observed"], "%Y-%m-%dT%H:%M:%S.%fZ"
)
# Make datetime aware of timezone
time_point = time_point.replace(tzinfo=datetime.timezone.utc)
# Transform to local timezone
time_point = time_point.astimezone()
sensor_d[time_point] = to_celsius(sample["temperature"])
data_d[sensor_id] = sensor_d
logging.info(f"Data_d has {len(data_d.keys())} nr of keys")
df =
|
pd.DataFrame.from_dict(data_d)
|
pandas.DataFrame.from_dict
|
"""
Metrics which don't quite deserve their own file.
"""
from typing import Optional, Sequence, Union
import pandas as pd
from pandas.api.types import is_categorical
from natsort import natsorted
import numpy as np
def confusion_matrix(
orig: Union[pd.Series, np.ndarray, Sequence],
new: Union[pd.Series, np.ndarray, Sequence],
data: Optional[pd.DataFrame] = None,
*,
normalize: bool = True,
) -> pd.DataFrame:
"""\
Given an original and new set of labels, create a labelled confusion matrix.
Parameters `orig` and `new` can either be entries in data or categorical arrays
of the same size.
Params
------
orig
Original labels.
new
New labels.
data
Optional dataframe to fill entries from.
normalize
Should the confusion matrix be normalized?
Examples
--------
.. plot::
import scanpy as sc; import seaborn as sns
pbmc = sc.datasets.pbmc68k_reduced()
cmtx = sc.metrics.confusion_matrix("bulk_labels", "louvain", pbmc.obs)
sns.heatmap(cmtx)
"""
from sklearn.metrics import confusion_matrix as _confusion_matrix
if data is not None:
if isinstance(orig, str):
orig = data[orig]
if isinstance(new, str):
new = data[new]
# Coercing so I don't have to deal with it later
orig, new = pd.Series(orig), pd.Series(new)
assert len(orig) == len(new)
unique_labels = pd.unique(np.concatenate((orig.values, new.values)))
# Compute
mtx = _confusion_matrix(orig, new, labels=unique_labels)
if normalize:
sums = mtx.sum(axis=1)[:, np.newaxis]
mtx = np.divide(mtx, sums, where=sums != 0)
# Label
orig_name = "Original labels" if orig.name is None else orig.name
new_name = "New Labels" if new.name is None else new.name
df = pd.DataFrame(
mtx,
index=pd.Index(unique_labels, name=orig_name),
columns=pd.Index(unique_labels, name=new_name),
)
# Filter
if is_categorical(orig):
orig_idx = pd.Series(orig).cat.categories
else:
orig_idx = natsorted(pd.unique(orig))
if
|
is_categorical(new)
|
pandas.api.types.is_categorical
|
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError, InvalidParameterError
class UcecConf(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0", "1.1", "1.2", "2.0", "2.0.1"]
data_files = {
"1.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_Direct_SRM_tumor_v1.0.cct.gz", #SRM not to be included in 1.0
#"UCEC_confirmatory_IMAC_SRM_tumor_v1.0.cct.gz",
"UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.0.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.0.maf.gz",
#"UCEC_confirmatory_WGS_SV_tumor_v1.0.txt.gz" #structural_variation - not to be included in 1.0
],
"1.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.1.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.1.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.1.maf.gz",
],
"1.2": [
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_SRM_Direct_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.2.txt.gz",
# "UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.2.maf.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v1.2.txt.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
# "UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
],
"2.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WES_somatic_mutation_category_level_V1.2.txt.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v2.0.txt.gz",
],
"2.0.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
],
}
# Call the parent class __init__ function
super().__init__(cancer_type="ucecconf", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
if file_name in ["UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["acetylproteomics_gene"] = df
elif file_name in ["UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df[['Name','Database_ID','Site']] = df.idx.str.split("@", expand=True)
df['Site'] = df['Site'].str.rsplit('-',1,expand=True)[1]
df = df.set_index(["Name", "Site", "Database_ID"])
df = df.drop(columns=["idx"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["acetylproteomics"] = df
elif file_name in ["UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx"]:
df = pd.read_excel(file_path)
df.insert(6, "Proteomics_Tumor_Normal", df["Group"])
df.loc[df['Group'] == 'Enriched_Normal', 'Idx'] = df['Idx'] + '.N'
df.loc[df['Group'] == 'Adjacent_normal', 'Idx'] = df['Idx'].str[:-2] + '.N'
df = df.set_index("Idx")
df.loc[df['Group'] != 'Tumor', 'Group'] = 'Normal'
df = df.rename({'Group': 'Sample_Tumor_Normal'}, axis=1)
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["clinical"] = df
elif file_name in ["UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0, na_values=' NA')
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["methylation"] = df
elif file_name in ["UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["miRNA"] = df
elif file_name in ["UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["phosphoproteomics_gene"] = df
elif file_name in ["UCEC_confirmatory_phospho_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df[['Name','Database_ID','Site']] = df.idx.str.split("@", expand=True)
df['Site'] = df['Site'].str.rsplit('-',1,expand=True)[1]
df = df.set_index(["Name", "Site", "Database_ID"])
df = df.drop(columns=["idx"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["phosphoproteomics"] = df
elif file_name in ["UCEC_confirmatory_proteomics_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["proteomics"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["circular_RNA"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.1.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.2.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df = df.set_index("Sample")
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["gene_fusion"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["transcriptomics"] = df
# Targeted proteomics is the direct and PRISM SRM data
elif file_name in ["UCEC_confirmatory_SRM_Direct_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",]:
df_direct = pd.read_csv(file_path, sep='\t')
df_direct[['Name','Peptide']] = df_direct['idx'].str.rsplit("-", 1, expand=True)
df_direct = df_direct.set_index(["Name", "Peptide"])
df_direct = df_direct.drop(columns=["idx"])
df_direct = df_direct.transpose()
df_direct = df_direct.sort_index()
df_direct.index.name = "Patient_ID"
# Merge if we have both
if "targeted_proteomics" in self._data:
df_prism = self._data["targeted_proteomics"]
df_combined = pd.concat([df_direct, df_prism])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["targeted_proteomics"] = df_combined
else:
self._data["targeted_proteomics"] = df_direct
elif file_name in ["UCEC_confirmatory_SRM_PRISM_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",]:
df_prism =
|
pd.read_csv(file_path, sep='\t')
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# # UCI
# # Drug Review Dataset
# In[ ]:
import pandas as pd
# In[2]:
data_train = pd.read_csv('.....\\drugsCom_raw\\drugsComTrain_raw.tsv',delimiter='\t')
data_test = pd.read_csv('......\\drugsCom_raw\\drugsComTest_raw.tsv' ,delimiter='\t')
# In[ ]:
# In[3]:
df = pd.concat([data_train,data_test]) # combine the two dataFrames into one for a bigger data size and ease of preprocessing
# In[4]:
data_train.shape
# In[5]:
data_test.shape
# In[6]:
df.head()
# In[7]:
df.columns = ['Id','drugName','condition','review','rating','date','usefulCount'] #rename columns
# In[8]:
df.head()
# In[9]:
df['date'] = pd.to_datetime(df['date']) #convert date to datetime eventhough we are not using date in this
# In[10]:
df['date'].head() #confirm conversion
# In[11]:
df2 = df[['Id','review','rating']].copy() # create a new dataframe with just review and rating for sentiment analysis
# In[12]:
df.head() #confirm conversion
# In[13]:
df2.head()
# In[14]:
df2.isnull().any().any() # check for null
# In[15]:
df2.info(null_counts=True) #another way to check for null
# In[16]:
df2.info() #check for datatype, also shows null
# In[17]:
df2['Id'].unique() # shows unique Id as array
# In[18]:
df2['Id'].count() #count total number of items in the Id column
# In[19]:
df2['Id'].nunique() #shows unique Id values
# In[20]:
df['review'][1] # access indivdual value
# In[21]:
df.review[1] # another method to assess individual value in a Series
# In[22]:
import nltk
nltk.download(['punkt','stopwords'])
# In[23]:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
# In[24]:
df2['cleanReview'] = df2['review'].apply(lambda x: ' '.join([item for item in x.split() if item not in stopwords])) # remove stopwords from review
# In[26]:
df2['cleanReview'] = df2['review'].apply(lambda x: ' '.join([item for item in x.split() if item not in stopwords])) # remove stopwords from review
# In[56]:
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
# In[57]:
df2['vaderReviewScore'] = df2['cleanReview'].apply(lambda x: analyzer.polarity_scores(x)['compound'])
# In[59]:
positive_num = len(df2[df2['vaderReviewScore'] >=0.05])
neutral_num = len(df2[(df2['vaderReviewScore'] >-0.05) & (df2['vaderReviewScore']<0.05)])
negative_num = len(df2[df2['vaderReviewScore']<=-0.05])
# In[60]:
positive_num,neutral_num, negative_num
# In[61]:
df2['vaderSentiment']= df2['vaderReviewScore'].map(lambda x:int(2) if x>=0.05 else int(1) if x<=-0.05 else int(0) )
# In[62]:
df2['vaderSentiment'].value_counts()
# In[63]:
Total_vaderSentiment = positive_num + neutral_num + negative_num
Total_vaderSentiment
# In[64]:
df2.loc[df2['vaderReviewScore'] >=0.05,"vaderSentimentLabel"] ="positive"
df2.loc[(df2['vaderReviewScore'] >-0.05) & (df2['vaderReviewScore']<0.05),"vaderSentimentLabel"]= "neutral"
df2.loc[df2['vaderReviewScore']<=-0.05,"vaderSentimentLabel"] = "negative"
# In[65]:
df2.shape
# In[66]:
positive_rating = len(df2[df2['rating'] >=7.0])
neutral_rating = len(df2[(df2['rating'] >=4) & (df2['rating']<7)])
negative_rating = len(df2[df2['rating']<=3])
# In[67]:
positive_rating,neutral_rating,negative_rating
# In[68]:
Total_rating = positive_rating+neutral_rating+negative_rating
Total_rating
# In[69]:
df2['ratingSentiment']= df2['rating'].map(lambda x:int(2) if x>=7 else int(1) if x<=3 else int(0) )
# In[70]:
df2['ratingSentiment'].value_counts()
# In[72]:
df2.loc[df2['rating'] >=7.0,"ratingSentimentLabel"] ="positive"
df2.loc[(df2['rating'] >=4.0) & (df2['rating']<7.0),"ratingSentimentLabel"]= "neutral"
df2.loc[df2['rating']<=3.0,"ratingSentimentLabel"] = "negative"
# In[98]:
df2 = df2[['Id','review','cleanReview','rating','ratingSentiment','ratingSentimentLabel','vaderReviewScore','vaderSentimentLabel','vaderSentiment']]
# # =============================
# In[104]:
data_df=df2.drop(['review','cleanReview'],axis=1)
# In[149]:
data_df.head()
# In[150]:
data_df.info()
# In[145]:
#data_df=df2.drop(['ratingSentimentLabel'],axis=1)
# In[169]:
from sklearn.preprocessing import LabelEncoder
# In[188]:
encoder = LabelEncoder()
data_cat = data_df["review"]
data_cat_encod = encoder.fit_transform(data_cat)
data_cat_encod =
|
pd.DataFrame(data_cat_encod,columns=["review"])
|
pandas.DataFrame
|
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
|
tm.assert_series_equal(result, self.expected2, check_names=False)
|
pandas._testing.assert_series_equal
|
# Given a folder directory, link and filter all data exported from ImageJ.
import pandas as pd
import trackpy as tp
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
import datetime
def link(path, FPS, MPP, SEARCH_RANGE_MICRONS, MEMORY, STUBS, MIN_VELOCITY, MIN_AREA, MAX_AREA):
"""
Given the path of a csv file output from ImageJ particle analysis, link, filter, and label the trajectories.
Outputs a dataframe for the csv file analyzed.
"""
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
from pandas.core.common import notnull, isnull
import pandas.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert isnull(np.inf)
assert isnull(-np.inf)
def test_any_none():
assert(common._any_none(1, 2, 3, None))
assert(not common._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(common._all_not_none(1, 2, 3, 4))
assert(not common._all_not_none(1, 2, 3, None))
assert(not common._all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined =
|
common.adjoin(2, *data)
|
pandas.core.common.adjoin
|
###
# LIBRARIES
###
import datetime as dt
import yfinance as yf
import pandas as pd
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import ssl
# Set the precision to 2 decimal places
pd.options.display.float_format = '{:.2f}'.format
###
# GLOBAL VARS
###
# For email configuration and formatting
TLS_PORT = 465 # no change
EMAIL_PASS = "<PASSWORD>" # change
SENDER_EMAIL = "<EMAIL>" # change
REC_EMAIL = "<EMAIL>" # change
MSG_TITLE = "Market Update - "+str(dt.date.today()) # no change
# Time Intervals
# Depending on the intervals you wish to compare against, change these
# e.g. yesterday close compared to two days ago, one month ago etc...
INTERVALS = [0, 1, 7, 30, 90, 180, 365]
# Tickers to watch, lookup any new ones to add on Yahoo finance
TICKERS = ['^FTSE', 'VOD.L', 'HSBA.L']
###
# Functions
###
# Helper function to help adjust weekends to weekdays
# Should be enhanced for trading holidays but I'm lazy...
def getLastWorkingDate(date):
if date.weekday() == 5:
return date - dt.timedelta(days = 1)
elif date.weekday() == 6:
return date - dt.timedelta(days = 2)
else:
return date
# Retrive the closing prices for the tickers for each of the
# days for the INTERVALS specified in the past
def getCloses(last_working_date):
results = pd.DataFrame()
for i in INTERVALS:
date = last_working_date - dt.timedelta(days = i)
date = getLastWorkingDate(date)
print("Retrieving data for date:", date)
res = yf.download(TICKERS, start=date - dt.timedelta(days=4), end=date + dt.timedelta(days=1))[-1:]
results = results.append(res["Adj Close"])
return results
# Once closing prices are retrieved calculate the difference
# between last close and those prices.
def getDiffs(closing_prices):
diff_results =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
# adapted from scikit-learn's estimator_checks
__author__ = ["mloning", "fkiraly"]
__all__ = ["check_estimator"]
import numbers
import pickle
import types
from copy import deepcopy
from inspect import isclass, signature
import joblib
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from sklearn import clone
from sklearn.utils._testing import set_random_state
from sklearn.utils.estimator_checks import (
check_get_params_invariance as _check_get_params_invariance,
)
from sklearn.utils.estimator_checks import check_set_params as _check_set_params
from sklearn.utils.validation import check_random_state
from sktime.annotation.base import BaseSeriesAnnotator
from sktime.base import BaseEstimator
from sktime.classification.base import BaseClassifier
from sktime.clustering.base.base import BaseClusterer
from sktime.datatypes._panel._check import is_nested_dataframe
from sktime.dists_kernels import BasePairwiseTransformer, BasePairwiseTransformerPanel
from sktime.exceptions import NotFittedError
from sktime.forecasting.base import BaseForecaster
from sktime.regression.base import BaseRegressor
from sktime.tests._config import (
NON_STATE_CHANGING_METHODS,
VALID_ESTIMATOR_BASE_TYPES,
VALID_ESTIMATOR_TAGS,
VALID_ESTIMATOR_TYPES,
VALID_TRANSFORMER_TYPES,
)
from sktime.transformations.base import (
_PanelToPanelTransformer,
_PanelToTabularTransformer,
_SeriesToPrimitivesTransformer,
_SeriesToSeriesTransformer,
)
from sktime.utils._testing.annotation import make_annotation_problem
from sktime.utils._testing.deep_equals import deep_equals
from sktime.utils._testing.forecasting import (
_get_n_columns,
_make_series,
make_forecasting_problem,
)
from sktime.utils._testing.panel import (
_make_panel_X,
make_classification_problem,
make_clustering_problem,
make_regression_problem,
)
def check_estimator(Estimator, exclude=None):
"""Check whether estimator complies with common interface.
Parameters
----------
Estimator : Estimator class
Raises
------
AssertionError
If Estimator does not comply
"""
for check in yield_estimator_checks(exclude=exclude):
check(Estimator)
def yield_estimator_checks(exclude=None):
"""Return iterator to yield estimator checks."""
checks = [
check_inheritance,
check_required_params,
check_estimator_tags,
check_has_common_interface,
check_constructor,
check_get_params,
check_set_params,
check_clone,
check_repr,
check_fit_updates_state,
check_fit_returns_self,
check_raises_not_fitted_error,
check_fit_idempotent,
check_fit_does_not_overwrite_hyper_params,
check_methods_do_not_change_state,
check_methods_have_no_side_effects,
check_persistence_via_pickle,
check_multiprocessing_idempotent,
check_valid_estimator_tags,
]
for check in checks:
# check if associated test is not included in the exclusion list
if check.__name__ in exclude:
continue
yield check
def check_required_params(Estimator):
"""Check required parameter interface."""
# Check common meta-estimator interface
if hasattr(Estimator, "_required_parameters"):
required_params = Estimator._required_parameters
assert isinstance(required_params, list), (
f"For estimator: {Estimator}, `_required_parameters` must be a "
f"tuple, but found type: {type(required_params)}"
)
assert all([isinstance(param, str) for param in required_params]), (
f"For estimator: {Estimator}, elements of `_required_parameters` "
f"list must be strings"
)
# check if needless parameters are in _required_parameters
init_params = [
param.name for param in signature(Estimator.__init__).parameters.values()
]
in_required_but_not_init = [
param for param in required_params if param not in init_params
]
if len(in_required_but_not_init) > 0:
raise ValueError(
f"Found parameters in `_required_parameters` which "
f"are not in `__init__`: "
f"{in_required_but_not_init}"
)
def check_estimator_tags(Estimator):
assert hasattr(Estimator, "get_class_tags")
all_tags = Estimator.get_class_tags()
assert isinstance(all_tags, dict)
assert all([isinstance(key, str) for key in all_tags.keys()])
if hasattr(Estimator, "_tags"):
tags = Estimator._tags
assert isinstance(tags, dict), f"_tags must be a dict, but found {type(tags)}"
assert len(tags) > 0, "_tags is empty"
assert all(
[tag in VALID_ESTIMATOR_TAGS for tag in tags.keys()]
), "Some tags in _tags are invalid"
# Avoid ambiguous class attributes
ambiguous_attrs = ("tags", "tags_")
for attr in ambiguous_attrs:
assert not hasattr(Estimator, attr), (
f"Please avoid using the {attr} attribute to disambiguate it from "
f"estimator tags."
)
def check_inheritance(Estimator):
# Check that estimator inherits from BaseEstimator
assert issubclass(Estimator, BaseEstimator), (
f"Estimator: {Estimator} " f"is not a sub-class of " f"BaseEstimator."
)
# Usually estimators inherit only from one BaseEstimator type, but in some cases
# they may be predictor and transformer at the same time (e.g. pipelines)
n_base_types = sum(
[issubclass(Estimator, cls) for cls in VALID_ESTIMATOR_BASE_TYPES]
)
assert 2 >= n_base_types >= 1
# If the estimator inherits from more than one base estimator type, we check if
# one of them is a transformer base type
if n_base_types > 1:
assert issubclass(Estimator, VALID_TRANSFORMER_TYPES)
def check_has_common_interface(Estimator):
# Check estimator implements the common interface
# Check class for type of attribute
assert isinstance(Estimator.is_fitted, property)
# Check instance
estimator = _construct_instance(Estimator)
common_attrs = [
"fit",
"check_is_fitted",
"is_fitted", # read-only property
"_is_fitted", # underlying estimator state
"set_params",
"get_params",
]
for attr in common_attrs:
assert hasattr(estimator, attr), (
f"Estimator: {estimator.__class__.__name__} does not implement "
f"attribute: {attr}"
)
assert hasattr(estimator, "predict") or hasattr(estimator, "transform")
if hasattr(estimator, "inverse_transform"):
assert hasattr(estimator, "transform")
if hasattr(estimator, "predict_proba"):
assert hasattr(estimator, "predict")
def check_get_params(Estimator):
# Check get params works correctly
estimator = _construct_instance(Estimator)
params = estimator.get_params()
assert isinstance(params, dict)
_check_get_params_invariance(estimator.__class__.__name__, estimator)
def check_set_params(Estimator):
# Check set_params works correctly
estimator = _construct_instance(Estimator)
params = estimator.get_params()
assert estimator.set_params(**params) is estimator
_check_set_params(estimator.__class__.__name__, estimator)
def check_clone(Estimator):
# Check we can call clone from scikit-learn
estimator = _construct_instance(Estimator)
clone(estimator)
def check_repr(Estimator):
# Check we can call repr
estimator = _construct_instance(Estimator)
repr(estimator)
def check_constructor(Estimator):
# Check that the constructor behaves correctly
estimator = _construct_instance(Estimator)
# Check that init does not construct object of other class than itself
assert isinstance(estimator, Estimator)
# Ensure that each parameter is set in init
init_params = _get_args(type(estimator).__init__)
invalid_attr = set(init_params) - set(vars(estimator)) - {"self"}
assert not invalid_attr, (
"Estimator %s should store all parameters"
" as an attribute during init. Did not find "
"attributes `%s`." % (estimator.__class__.__name__, sorted(invalid_attr))
)
# Ensure that init does nothing but set parameters
# No logic/interaction with other parameters
def param_filter(p):
"""Identify hyper parameters of an estimator."""
return (
p.name != "self" and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL
)
init_params = [
p for p in signature(estimator.__init__).parameters.values() if param_filter(p)
]
params = estimator.get_params()
# Filter out required parameters with no default value and parameters
# set for running tests
required_params = getattr(estimator, "_required_parameters", tuple())
test_params = Estimator.get_test_params()
if isinstance(test_params, list):
test_params = test_params[0]
test_params = test_params.keys()
init_params = [
param
for param in init_params
if param.name not in required_params and param.name not in test_params
]
for param in init_params:
assert param.default != param.empty, (
"parameter `%s` for %s has no default value and is not "
"included in `_required_parameters`"
% (param.name, estimator.__class__.__name__)
)
if type(param.default) is type:
assert param.default in [np.float64, np.int64]
else:
assert type(param.default) in [
str,
int,
float,
bool,
tuple,
type(None),
np.float64,
types.FunctionType,
joblib.Memory,
]
param_value = params[param.name]
if isinstance(param_value, np.ndarray):
np.testing.assert_array_equal(param_value, param.default)
else:
if bool(isinstance(param_value, numbers.Real) and np.isnan(param_value)):
# Allows to set default parameters to np.nan
assert param_value is param.default, param.name
else:
assert param_value == param.default, param.name
def check_fit_updates_state(Estimator):
# Check that fit updates the is-fitted states
attrs = ["_is_fitted", "is_fitted"]
estimator = _construct_instance(Estimator)
# Check it's not fitted before calling fit
for attr in attrs:
assert not getattr(
estimator, attr
), f"Estimator: {estimator} does not initiate attribute: {attr} to False"
fit_args = _make_args(estimator=estimator, method="fit")
estimator.fit(*fit_args)
# Check states are updated after calling fit
for attr in attrs:
assert getattr(
estimator, attr
), f"Estimator: {estimator} does not update attribute: {attr} during fit"
def check_fit_returns_self(Estimator):
# Check that fit returns self
estimator = _construct_instance(Estimator)
fit_args = _make_args(estimator=estimator, method="fit")
assert (
estimator.fit(*fit_args) is estimator
), f"Estimator: {estimator} does not return self when calling fit"
def check_raises_not_fitted_error(Estimator):
# Check that we raise appropriate error for unfitted estimators
estimator = _construct_instance(Estimator)
# call methods without prior fitting and check that they raise our
# NotFittedError
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
args = _make_args(estimator, method)
with pytest.raises(NotFittedError, match=r"has not been fitted"):
getattr(estimator, method)(*args)
def check_fit_idempotent(Estimator):
# Check that calling fit twice is equivalent to calling it once
estimator = _construct_instance(Estimator)
set_random_state(estimator)
# Fit for the first time
fit_args = _make_args(estimator=estimator, method="fit")
estimator.fit(*fit_args)
results = dict()
args = dict()
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
args[method] = _make_args(estimator, method)
results[method] = getattr(estimator, method)(*args[method])
# Fit again
set_random_state(estimator)
estimator.fit(*fit_args)
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(*args[method])
_assert_array_almost_equal(
results[method],
new_result,
# err_msg=f"Idempotency check failed for method {method}",
)
def check_fit_does_not_overwrite_hyper_params(Estimator):
# Check that we do not overwrite hyper-parameters in fit
estimator = _construct_instance(Estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
fit_args = _make_args(estimator=estimator, method="fit")
estimator.fit(*fit_args)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert joblib.hash(new_value) == joblib.hash(original_value), (
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (estimator.__class__.__name__, param_name, original_value, new_value)
)
def check_methods_do_not_change_state(Estimator):
# Check that methods that are not supposed to change attributes of the
# estimators do not change anything (including hyper-parameters and
# fitted parameters)
estimator = _construct_instance(Estimator)
set_random_state(estimator)
fit_args = _make_args(estimator=estimator, method="fit")
estimator.fit(*fit_args)
dict_before = estimator.__dict__.copy()
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
args = _make_args(estimator=estimator, method=method)
getattr(estimator, method)(*args)
if method == "transform" and Estimator.get_class_tag("fit-in-transform"):
# Some transformations fit during transform, as they apply
# some transformation to each series passed to transform,
# so transform will actually change the state of these estimator.
continue
assert (
estimator.__dict__ == dict_before
), f"Estimator: {estimator} changes __dict__ during {method}"
def check_methods_have_no_side_effects(Estimator):
# Check that calling methods has no side effects on args
if not isclass(Estimator):
Estimator = type(Estimator)
estimator = _construct_instance(Estimator)
set_random_state(estimator)
# Fit for the first time
fit_args = _make_args(estimator=estimator, method="fit")
old_fit_args = deepcopy(fit_args)
estimator.fit(*fit_args)
assert deep_equals(
old_fit_args, fit_args
), f"Estimator: {estimator} has side effects on arguments of fit"
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
new_args = _make_args(estimator=estimator, method=method)
old_args = deepcopy(new_args)
getattr(estimator, method)(*new_args)
assert deep_equals(
old_args, new_args
), f"Estimator: {estimator} has side effects on arguments of {method}"
def check_persistence_via_pickle(Estimator):
# Check that we can pickle all estimators
estimator = _construct_instance(Estimator)
set_random_state(estimator)
fit_args = _make_args(estimator=estimator, method="fit")
estimator.fit(*fit_args)
# Generate results before pickling
results = dict()
args = dict()
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
args[method] = _make_args(estimator=estimator, method=method)
results[method] = getattr(estimator, method)(*args[method])
# Pickle and unpickle
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
# Compare against results after pickling
for method in results:
unpickled_result = getattr(unpickled_estimator, method)(*args[method])
_assert_array_almost_equal(
results[method],
unpickled_result,
decimal=6,
err_msg="Results are not the same after pickling",
)
def check_multiprocessing_idempotent(Estimator):
# Check that running an estimator on a single process is no different to running
# it on multiple processes. We also check that we can set n_jobs=-1 to make use
# of all CPUs. The test is not really necessary though, as we rely on joblib for
# parallelization and can trust that it works as expected.
estimator = _construct_instance(Estimator)
params = estimator.get_params()
if "n_jobs" in params:
results = dict()
args = dict()
# run on a single process
estimator = _construct_instance(Estimator)
estimator.set_params(n_jobs=1)
set_random_state(estimator)
args["fit"] = _make_args(estimator=estimator, method="fit")
estimator.fit(*args["fit"])
# compute and store results
for method in NON_STATE_CHANGING_METHODS:
if hasattr(estimator, method):
args[method] = _make_args(estimator=estimator, method=method)
results[method] = getattr(estimator, method)(*args[method])
# run on multiple processes, reusing the same input arguments
estimator = _construct_instance(Estimator)
estimator.set_params(n_jobs=-1)
set_random_state(estimator)
estimator.fit(*args["fit"])
# compute and compare results
for method in results:
if hasattr(estimator, method):
result = getattr(estimator, method)(*args[method])
_assert_array_equal(
results[method],
result,
err_msg="Results are not equal for n_jobs=1 and n_jobs=-1",
)
def check_valid_estimator_tags(Estimator):
# check if Estimator tags are in VALID_ESTIMATOR_TAGS
for tag in Estimator.get_class_tags().keys():
assert tag in VALID_ESTIMATOR_TAGS
def _get_err_msg(estimator):
return (
f"Invalid estimator type: {type(estimator)}. Valid estimator types are: "
f"{VALID_ESTIMATOR_TYPES}"
)
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
# return the instance of the class with default parameters
return Estimator.create_test_instance()
def _make_args(estimator, method, **kwargs):
"""Generate testing arguments for estimator methods."""
if method == "fit":
return _make_fit_args(estimator, **kwargs)
if method == "update":
raise NotImplementedError()
elif method in ("predict", "predict_proba", "decision_function"):
return _make_predict_args(estimator, **kwargs)
elif method == "transform":
return _make_transform_args(estimator, **kwargs)
elif method == "inverse_transform":
return _make_inverse_transform_args(estimator, **kwargs)
else:
raise ValueError(f"Method: {method} not supported")
def _make_fit_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
# we need to handle the TransformedTargetForecaster separately
if isinstance(estimator, _SeriesToSeriesTransformer):
y = _make_series(**kwargs)
else:
# create matching n_columns input, if n_columns not passed
# e.g., to give bivariate y to strictly multivariate forecaster
if "n_columns" not in kwargs.keys():
n_columns = _get_n_columns(
estimator.get_tag(tag_name="scitype:y", raise_error=False)
)[0]
y = make_forecasting_problem(n_columns=n_columns, **kwargs)
else:
y = make_forecasting_problem(**kwargs)
fh = 1
X = None
return y, X, fh
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(**kwargs)
return (X,)
elif isinstance(estimator, BaseClassifier):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseRegressor):
return make_regression_problem(**kwargs)
elif isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(estimator, (_PanelToTabularTransformer, _PanelToPanelTransformer)):
return make_classification_problem(**kwargs)
elif isinstance(estimator, BaseClusterer):
return (make_clustering_problem(**kwargs),)
elif isinstance(estimator, BasePairwiseTransformer):
return None, None
elif isinstance(estimator, BasePairwiseTransformerPanel):
return None, None
else:
raise ValueError(_get_err_msg(estimator))
def _make_predict_args(estimator, **kwargs):
if isinstance(estimator, BaseForecaster):
fh = 1
return (fh,)
elif isinstance(estimator, (BaseClassifier, BaseRegressor)):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BaseSeriesAnnotator):
X = make_annotation_problem(n_timepoints=10, **kwargs)
return (X,)
elif isinstance(estimator, BaseClusterer):
X = _make_panel_X(**kwargs)
return (X,)
else:
raise ValueError(_get_err_msg(estimator))
def _make_transform_args(estimator, **kwargs):
if isinstance(
estimator, (_SeriesToPrimitivesTransformer, _SeriesToSeriesTransformer)
):
X = _make_series(**kwargs)
return (X,)
elif isinstance(
estimator,
(
_PanelToTabularTransformer,
_PanelToPanelTransformer,
),
):
X = _make_panel_X(**kwargs)
return (X,)
elif isinstance(estimator, BasePairwiseTransformer):
d = {"col1": [1, 2], "col2": [3, 4]}
return pd.DataFrame(d), pd.DataFrame(d)
elif isinstance(estimator, BasePairwiseTransformerPanel):
d =
|
pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import average_precision_score, precision_recall_curve
from ._woe_binning import woe_binning, woe_binning_2, woe_binning_3
class Metrics:
def __init__(self, df, actual, prediction):
self.df = df
self.target = actual
self.actual = df[actual]
self.prediction = df[prediction]
self.gains = self.calculate_gains()
self.ks = self.ks()
self.gini = self.gini()
self.tn, self.fp, self.fn, self.tp, self.precision, self.recall, self.f1_score = self.precision_recall_f1_score()
def calculate_gains(self):
"""Returns a pandas dataframe with gains along with KS and Gini calculated"""
self.df['scaled_score'] = (self.df['positive_probability']*1000000).round(0)
gains = self.df.groupby('scaled_score')[self.target].agg(['count','sum'])
gains.columns = ['total','responders']
gains.reset_index(inplace=True)
gains.sort_values(by='scaled_score', ascending=False)
gains['non_responders'] = gains['total'] - gains['responders']
gains['cum_resp'] = gains['responders'].cumsum()
gains['cum_non_resp'] = gains['non_responders'].cumsum()
gains['total_resp'] = gains['responders'].sum()
gains['total_non_resp'] = gains['non_responders'].sum()
gains['perc_resp'] = (gains['responders']/gains['total_resp'])*100
gains['perc_non_resp'] = (gains['non_responders']/gains['total_non_resp'])*100
gains['perc_cum_resp'] = gains['perc_resp'].cumsum()
gains['perc_cum_non_resp'] = gains['perc_non_resp'].cumsum()
gains['k_s'] = gains['perc_cum_resp'] - gains['perc_cum_non_resp']
return gains
def get_threshold(self):
"""Returns a pandas dataframe with y_pred based on threshold from roc_curve."""
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
threshold_cutoff_df = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'threshold': threshold})
threshold_cutoff_df['distance'] = ((threshold_cutoff_df['fpr']-0)**2+(threshold_cutoff_df['tpr']-1)**2)**0.5
threshold_cutoff_df['distance_diff'] = abs(threshold_cutoff_df['distance'].diff(periods=1))
for index, rows in threshold_cutoff_df.iterrows():
if index != 0 and index != threshold_cutoff_df.shape[0]-1:
curr_val = threshold_cutoff_df.loc[index, 'distance_diff']
prev_val = threshold_cutoff_df.loc[index-1, 'distance_diff']
next_val = threshold_cutoff_df.loc[index+1, 'distance_diff']
if curr_val>prev_val and curr_val>next_val:
threshold_cutoff = threshold_cutoff_df.loc[index, 'threshold']
break
return threshold_cutoff
def gini(self):
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
auroc = auc(fpr, tpr)
gini = 2*auroc -1
return gini
def ks(self):
gains = self.gains()
return gains['k_s'].max()
def precision_recall_f1_score(self):
threshold_cutoff = self.get_threshold()
self.y_pred = np.where(self.prediction>=threshold_cutoff,1,0)
self.df['y_pred'] = self.y_pred
tn, fp, fn, tp = confusion_matrix(self.actual, self.y_pred).ravel()
precision = precision_score(self.actual, self.y_pred)
recall = recall_score(self.actual, self.y_pred)
f1 = f1_score(self.actual, self.y_pred)
return tn, fp, fn, tp, precision, recall, f1
def to_dict(self):
return {'ks': self.ks, 'gini': self.gini, 'tn': self.tn, 'tp': self.tp, 'fn': self.fn, 'fp': self.fp, 'precision': self.precision, 'recall': self.recall, 'f1_score': self.f1_score}
def standard_metrics(df, target_col, prediction_col):
"""Returns a dict with all metrics - Gini, KS, Precision, Recall, F1 Score, True Negative, True Positive, False Positive, False Negative."""
metrics = Metrics(df, target_col, prediction_col)
return metrics.to_dict()
def quick_psi(dev, val):
"""Calculate PSI from 2 arrays - dev and val"""
return sum([(a-b)*np.log(a/b) for (a,b) in zip(dev,val)])
def psi(dev, val, target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with psi column (Population Stability Index) after creating 10 deciles.
Code includes creating score calculation using round(500-30 x log(100 x (p/(1-p))), 0) where p is probability.
We need to pass both dev and val at same time to apply same bins created on dev dataframe.
"""
dev['score'] = dev[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
val['score'] = val[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
_, bins = pd.qcut(dev.score, n_bins, retbins=True, precision=0)
bins = [int(i) if abs(i)!=np.inf else i for i in bins]
dev['bins'] =
|
pd.cut(dev.score, bins)
|
pandas.cut
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.