prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
|
tm.assert_frame_equal(actual, df1)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
# Implementation of Feed Forward Neural Network Classifer using gradient descent
from __future__ import print_function, division
import numpy as np
import pandas as pd
import dl_nn
import dl_nn_mini
import dl_nn_mini_mo
import time
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import matplotlib.pyplot as plt
c0_mean = [2, 2]
c0_cov = [[1, 0], [0, 1]]
c0 = np.random.multivariate_normal(c0_mean, c0_cov, 5000)
c0_df = pd.DataFrame(c0)
c0_df.columns = ['x', 'y']
c0_df['group'] = 1.0
c0_df['t0'] = 0.0
c0_df['t1'] = 1.0
c0_df['t2'] = 0.0
c1_mean = [-2, -2]
c1_cov = [[1, 0], [0, 1]]
c1 = np.random.multivariate_normal(c1_mean, c1_cov, 5000)
c1_df = pd.DataFrame(c1)
c1_df.columns = ['x', 'y']
c1_df['group'] = 1.0
c1_df['t0'] = 0.0
c1_df['t1'] = 1.0
c1_df['t2'] = 0.0
c2_mean = [2, -2]
c2_cov = [[1, 0], [0, 1]]
c2 = np.random.multivariate_normal(c2_mean, c2_cov, 5000)
c2_df = pd.DataFrame(c2)
c2_df.columns = ['x', 'y']
c2_df['group'] = 0.0
c2_df['t0'] = 1.0
c2_df['t1'] = 0.0
c2_df['t2'] = 0.0
c3_mean = [-2, 2]
c3_cov = [[1, 0], [0, 1]]
c3 = np.random.multivariate_normal(c3_mean, c3_cov, 5000)
c3_df = pd.DataFrame(c3)
c3_df.columns = ['x', 'y']
c3_df['group'] = 2.0
c3_df['t0'] = 0.0
c3_df['t1'] = 0.0
c3_df['t2'] = 1.0
dat =
|
pd.concat([c0_df, c1_df, c2_df, c3_df], ignore_index=True)
|
pandas.concat
|
import numpy as np
import pandas as pd
from pandas import DataFrame as df
from pandas import Series as sr
def rndlist(n=4):
x,y=[],[]
l=list(range(n))
for i in range(n):
x.append(np.random.permutation(l))
y.append(np.random.permutation(l))
f1=df(x)
f2=
|
df(y)
|
pandas.DataFrame
|
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp =
|
pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
|
pandas.DataFrame
|
# Libraries import
import numpy as np
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the "../input/" directory.
for dirname, _, filenames in os.walk('./input'):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data =
|
pd.read_csv("./input/train.csv")
|
pandas.read_csv
|
from contextlib import contextmanager
from unittest.mock import patch
from zipfile import ZipFile
from pandas import DataFrame, read_csv
from pandas.util.testing import assert_frame_equal
from pytest import raises, fixture, warns, mark
from IPython import get_ipython
from data_vault import Vault, parse_arguments, VaultMagics
from data_vault.frames import frame_manager
@contextmanager
def file_from_storage(archive_path, file_path, pwd: str = None, mode='r'):
if pwd:
pwd = pwd.encode()
with ZipFile(archive_path) as archive:
yield archive.open(
file_path,
mode=mode,
pwd=pwd
)
ipython = get_ipython()
EXAMPLE_DATA_FRAME = DataFrame([{'a': 1, 'b': 1}, {'a': 1, 'b': 2}])
def patch_ipython_globals(dummy_globals):
return patch.object(frame_manager, 'get_ipython_globals', return_value=dummy_globals)
@fixture
def mock_key(monkeypatch):
monkeypatch.setenv('KEY', 'a_strong_password')
def test_open_vault_message():
with raises(Exception, match='Please setup the storage with %open_vault first'):
ipython.magic('vault del x')
def test_vault_security_alert(tmpdir):
# should warn if not encryption key provided
with warns(UserWarning, match='Encryption variable not set - no encryption will be used..*'):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip')
# should not warn if secure explicitly toggled off
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
assert not record.list
# should not warn if encryption key provided
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e SOME_KEY')
assert not record.list
def test_usage_help(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(ValueError, match='No command matched. Did you mean:\n\t - store .*?'):
ipython.magic('vault store x')
def test_variable_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
with patch_ipython_globals(locals()):
with raises(ValueError, match=".*variable 'x' is not defined in the global namespace.*"):
ipython.magic('vault store x')
def test_function_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(NameError, match="function 'pipe_delimited' is not defined in the global namespace"):
ipython.magic('vault store x in my_frames with pipe_delimited')
def test_store(tmpdir):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames')
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data =
|
read_csv(f, sep='\t', index_col=0)
|
pandas.read_csv
|
import pyspark
from pyspark.sql import SQLContext
import pandas as pd
import csv
import os
def load_states():
# read US states
f = open('states.txt', 'r')
states = set()
for line in f.readlines():
l = line.strip('\n')
if l != '':
states.add(l)
return states
def validate2(states, bt):
#sqlContext = SQLContext(sc)
for state in states:
if not os.path.exists("US/" + state):
continue
"""
Train
"""
train_prefix = "US/" + state + '/' + bt + "/train/" + state + "_train_"
business_train_fname = train_prefix + 'yelp_academic_dataset_business.csv'
business_train_fname2 = train_prefix + 'yelp_academic_dataset_business2.csv'
review_train_fname = train_prefix + 'yelp_academic_dataset_review.csv'
checkins_train_fname = train_prefix + 'yelp_academic_dataset_checkin.csv'
tip_train_fname = train_prefix + 'yelp_academic_dataset_tip.csv'
user_train_fname = train_prefix + 'yelp_academic_dataset_user.csv'
df_business_train = pd.read_csv(business_train_fname)
df_review_train = pd.read_csv(review_train_fname)
df_checkins_train = pd.read_csv(checkins_train_fname)
df_tip_train =
|
pd.read_csv(tip_train_fname)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from pyomo import environ
from pyomo.environ import *
def optimization_MIP(model,
x, ## decision variables (already attached to model)
model_master, ## master table that specifies learned functions for constraints (and parameters)
data, ## dataframe holding all data to be used for convex hull
max_violation=None, ## parameter for RF model allowable violation proportion (between 0-1)
tr=True, ## bool variable for the use of trust region constraints
clustering_model=None): ## trained clustering algorithm using the entire data (only active if tr = True)
def logistic_x(proba):
if proba == 0:
proba = 0.00001
if proba == 1:
proba = 0.99999
return - np.log(1 / proba - 1)
def constraints_linear(model, outcome, task, coefficients, lb=None, ub=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained linear predictive model for 'outcome' into the master 'model'.
'Coefficients' is a model file generated by the constraint_extrapolation_skEN() function.
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'''
# Row-level information: row = single constraint (multiple rows can correspond to single leaf)
intercept = coefficients['intercept'][0]
coeff = coefficients.drop(['intercept'], axis=1, inplace=False).loc[0, :]
model.add_component('LR'+outcome, Constraint(expr=model.y[outcome] == sum(model.x[i] * coeff.loc[i] for i in pd.DataFrame(coeff).index) + intercept))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if not pd.isna(ub):
if task == 'binary':
ub = logistic_x(proba=ub)
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
if task == 'binary':
lb = logistic_x(proba=lb)
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
def constraints_svm(model, outcome, task, coefficients, lb=None, ub=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained SVM predictive model for 'outcome' into the master 'model'.
'Coefficients' is a model file generated by the constraint_extrapolation_skSVM() function.
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'''
# Row-level information: row = single constraint (multiple rows can correspond to single leaf)
intercept = coefficients['intercept'][0]
coeff = coefficients.drop(['intercept'], axis=1, inplace=False).loc[0, :]
# Set y to decision function
model.add_component('SVM'+outcome, Constraint(expr=model.y[outcome] == sum(model.x[i] * coeff.loc[i] for i in features) + intercept))
# Set y to binary: 1 if expr >= 0, else 0
# model.add_component('SVM_lb'+outcome, Constraint(expr=model.y[outcome] >= 1/M*(sum(model.x[i] * coeff.loc[i] for i in features) + intercept)))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if task == "continuous":
if not pd.isna(ub):
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
elif task == "binary":
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= 0))
def constraints_tree(model, outcome, tree_table, lb=None, ub=None, M=1e5, weight_objective=0, SCM=None, features=None):
'''
Embed a trained decision tree predictive model for 'outcome' into the master 'model'.
'tree_table' is a model file generated by the constraint_extrapolation_skTree() function, where each row encodes a single constraint (multiple rows can correspond to single leaf)
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'M' is an upper bound on the value at any node.
'''
leaf_values = tree_table.loc[:, ['ID', 'prediction']].drop_duplicates().set_index('ID')
# Row-level information:
intercept = tree_table['threshold']
coeff = tree_table.drop(['ID', 'threshold', 'prediction'], axis=1, inplace=False).reset_index(drop=True)
l_ids = tree_table['ID']
n_constr = coeff.shape[0]
L = np.unique(tree_table['ID'])
def constraintsTree_1(model, j):
return sum(model.x[i]*coeff.loc[j, i] for i in features) <= intercept.iloc[j] + M*(1-model.l[(outcome,str(l_ids.iloc[j]))])
def constraintsTree_2(model):
return sum(model.l[(outcome, str(i))] for i in L) == 1
def constraintTree(model):
return model.y[outcome] == sum(leaf_values.loc[i, 'prediction'] * model.l[(outcome, str(i))] for i in L)
model.add_component(outcome+'_1', Constraint(range(n_constr), rule=constraintsTree_1))
model.add_component('DT'+outcome, Constraint(rule=constraintTree))
model.add_component(outcome+'_2', Constraint(rule=constraintsTree_2))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if not pd.isna(ub):
model.add_component('ub_'+outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
def constraints_rf(model, outcome, forest_table, ub=None, lb=None, max_violation=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained random forest predictive model for 'outcome' into the master 'model'.
'forest_table' is a model file generated by the constraint_extrapolation_skRF() function, where each row encodes a single constraint (multiple rows can correspond to single leaf)
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'max_violation' specifies the allowable violation proportion for a constraint (e.g. 0.2 -> 20% of trees can violate the chosen lb/ub)
'''
forest_table['Tree_id'] = [outcome + '_' + str(i) for i in forest_table['Tree_id']]
T = np.unique(forest_table['Tree_id'])
## For each tree in the forest, add tree to model and define outcome y
for i, t in enumerate(T):
tree_table = forest_table.loc[forest_table['Tree_id'] == t, :].drop('Tree_id', axis=1)
# don't set LB, UB, or objective for individual trees
constraints_tree(model, t, tree_table, lb=None, ub=None, weight_objective=0, SCM=None, features=features)
## Compute average (as y[outcome]), either for avg. constraint or objective
model.add_component('RF'+outcome, Constraint(rule=model.y[outcome] == 1 / len(T) * quicksum(model.y[j] for j in T)))
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if pd.isna(max_violation):
# Constrain average values
if not pd.isna(ub):
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
else:
# Constrain proportion of trees (1 - max_violation)
if not pd.isna(ub):
def constraint_upperBoundViol(model, j):
return 1 / 100 * (model.y[j] - ub) <= model.y_viol[(outcome, str(j))]
model.add_component('upperBoundViol'+outcome, Constraint(T, rule=constraint_upperBoundViol))
if not pd.isna(lb):
def constraint_lowerBoundViol(model, j):
return 1 / 100 * (lb - model.y[j]) <= model.y_viol[(outcome, str(j))]
model.add_component('lowerBoundViol' + outcome, Constraint(T, rule=constraint_lowerBoundViol))
model.add_component('constraintViol'+outcome, Constraint(rule=1 / len(T) * sum(model.y_viol[(outcome, str(j))] for j in T) <= max_violation))
def constraints_gbm(model, outcome, task, gbm_table, ub=None, lb=None, weight_objective=0, SCM=None, features=None):
'''
Embed a trained gradient-boosting machine model for 'outcome' into the master 'model'.
'gbm_table' is a model file generated by the constraint_extrapolation_skGBM() function, where each row encodes a single constraint (multiple rows can correspond to single leaf)
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'''
gbm_table['Tree_id'] = [outcome + '_' + str(i) for i in gbm_table['Tree_id']]
T = np.unique(gbm_table['Tree_id'])
## For each tree in the forest, add tree to model and define outcome y
for i, t in enumerate(T):
tree_table = gbm_table.loc[gbm_table['Tree_id'] == t, :].drop(
['Tree_id', 'initial_prediction', 'learning_rate'], axis=1, inplace=False)
# don't set LB, UB, or objective for individual trees
constraints_tree(model, t, tree_table, lb=None, ub=None, weight_objective=0, SCM=None, features=features)
# ## Compute average (as y[outcome]), either for avg. constraint or objective
def constraint_gbm(model):
return model.y[outcome] == np.unique(gbm_table['initial_prediction']).item() + np.unique(gbm_table['learning_rate']).item() * quicksum(model.y[j] for j in T)
model.add_component('GBM'+outcome, Constraint(rule=constraint_gbm))
if task == 'binary':
lb = logistic_x(proba=lb) if lb != None else None
ub = logistic_x(proba=ub) if ub != None else None
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not pd.isna(SCM):
model.add_component('scm_' + outcome, Constraint(expr=model.y[outcome] == SCM + model.x[outcome]))
else:
if not pd.isna(ub):
if task == 'binary':
ub = logistic_x(proba=ub)
model.add_component('ub_' + outcome, Constraint(expr=model.y[outcome] <= ub))
if not pd.isna(lb):
if task == 'binary':
lb = logistic_x(proba=lb)
model.add_component('lb_' + outcome, Constraint(expr=model.y[outcome] >= lb))
def constraints_mlp(model, data, outcome, task, weights, lb=None, ub=None, weight_objective=0, SCM=None, features=None, M_l=-1e5, M_u=1e5):
'''
Embed a trained multi-layer perceptron model for 'outcome' into the master 'model'.
'weights' is a model file generated by the constraint_extrapolation_skMLP() function, where each row encodes the coefficients for a single node/layer pair
'lb/ub' specify the lower/upper bounds if 'outcome' is to be incorporated as a constraint.
'weight_objective' specifies the weight to use if incorporating 'outcome' as a term in the objective.
'M_l' and 'M_u' are lower/upper bounds on the value at any node/layer pair.
'''
# Recursively generate constraints linking nodes between layers, starting from input
nodes_input = range(len(features))
# v_input = [x[f'col{i}'] for i in nodes_input]
v_input = [x[i] for i in features]
max_layer = max(weights['layer'])
for l in range(max_layer + 1):
df_layer = weights.query('layer == %d' % l)
max_nodes = [k for k in df_layer.columns if 'node_' in k]
# coeffs_layer = np.array(df_layer.iloc[:, range(len(max_nodes))].dropna(axis=1))
coeffs_layer = np.array(df_layer.loc[:, max_nodes].dropna(axis=1))
intercepts_layer = np.array(df_layer['intercept'])
nodes = df_layer['node']
if l == max_layer:
node = nodes.iloc[0] # only one node in last layer
model.add_component('MLP'+outcome, Constraint(rule=model.y[outcome] == sum(v_input[i] * coeffs_layer[node, i] for i in nodes_input) + intercepts_layer[
node]))
else:
# Save v_pos for input to next layer
v_pos_list = []
for node in nodes:
## Initialize variables
v_pos_list.append(model.v[(outcome, l, node)])
model.add_component('constraint_1_' + str(l) + '_'+str(node)+outcome,
Constraint(rule=model.v[(outcome, l, node)] >= sum(v_input[i] * coeffs_layer[node, i] for i in nodes_input) + intercepts_layer[node]))
model.add_component('constraint_2_' + str(l)+'_' + str(node) + outcome,
Constraint(rule=model.v[(outcome, l, node)] <= M_u * (model.v_ind[(outcome, l, node)])))
model.add_component('constraint_3_' + str(l)+'_' + str(node) + outcome,
Constraint(rule=model.v[(outcome, l, node)] <= sum(v_input[i] * coeffs_layer[node, i] for i in nodes_input) + intercepts_layer[node] - M_l * (1 - model.v_ind[(outcome, l, node)])))
## Prepare nodes_input for next layer
nodes_input = nodes
v_input = v_pos_list
if weight_objective != 0:
model.OBJ.set_value(expr=model.OBJ.expr + weight_objective * model.y[outcome])
elif not
|
pd.isna(SCM)
|
pandas.isna
|
import numpy as np
import pandas as pd
import lightgbm as lgb
from ..search.base import BoosterSearchBase
class LgbGridSearch(BoosterSearchBase):
def __init__(self, param_grid, cv, metric, maximize):
super().__init__(param_grid, cv, metric, maximize)
def tr_transform(self, tr_x, tr_y):
return lgb.Dataset(tr_x, tr_y)
def te_transform(self, te_x):
return te_x
def eval_cv(self, cv_result, n_folds):
cv_result = pd.DataFrame(cv_result.copy())
cv_result.columns = [col.replace('stdv', 'std') for col in cv_result]
return self._eval_cv(cv_result, n_folds)
def compute_booster_cv(self, params, tr_data, *args, **kwargs):
cv_models = {}
cv_result = lgb.cv(
params,
tr_data,
folds=self.folds,
callbacks=[save_cv_models(cv_models)],
*args, **kwargs
)
return cv_result, cv_models['cv_packs']
def extract_model_meta_features(self, comb_id, cv_packs, te_x):
tr_meta_ftr = np.zeros(self.n_train_rows)
te_meta_ftr = np.zeros(te_x.shape[0])
for lgb_model, fold in zip(cv_packs.boosters, self.folds):
val_idx = fold[1]
assert len(lgb_model.valid_sets) == 1
assert len(val_idx) == lgb_model.valid_sets[0].num_data()
tr_meta_ftr[val_idx] = lgb_model._Booster__inner_predict(data_idx=1)
te_meta_ftr += lgb_model.predict(te_x)
te_meta_ftr = te_meta_ftr / len(self.folds)
model_name = 'lgb_%d' % comb_id
tr_meta_ftr = pd.Series(tr_meta_ftr, name=model_name)
te_meta_ftr =
|
pd.Series(te_meta_ftr, name=model_name)
|
pandas.Series
|
import datetime
import string
from collections import namedtuple
from distutils.version import LooseVersion
from random import choices
from typing import Optional, Type
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.tests.extension.base import (
BaseArithmeticOpsTests,
BaseBooleanReduceTests,
BaseCastingTests,
BaseComparisonOpsTests,
BaseConstructorsTests,
BaseDtypeTests,
BaseGetitemTests,
BaseGroupbyTests,
BaseInterfaceTests,
BaseMethodsTests,
BaseMissingTests,
BaseNoReduceTests,
BaseNumericReduceTests,
BaseParsingTests,
BasePrintingTests,
BaseReshapingTests,
BaseSetitemTests,
)
from fletcher import FletcherBaseDtype
if LooseVersion(pd.__version__) >= "0.25.0":
# imports of pytest fixtures needed for derived unittest classes
from pandas.tests.extension.conftest import ( # noqa: F401
as_array, # noqa: F401
use_numpy, # noqa: F401
groupby_apply_op, # noqa: F401
as_frame, # noqa: F401
as_series, # noqa: F401
)
PANDAS_GE_1_1_0 = LooseVersion(pd.__version__) >= "1.1.0"
FletcherTestType = namedtuple(
"FletcherTestType",
[
"dtype",
"data",
"data_missing",
"data_for_grouping",
"data_for_sorting",
"data_missing_for_sorting",
"data_repeated",
],
)
def is_arithmetic_type(arrow_dtype: pa.DataType) -> bool:
"""Check whether this is a type that support arithmetics."""
return (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
)
skip_non_artithmetic_type = pytest.mark.skip_by_type_filter(
[lambda x: not is_arithmetic_type(x)]
)
xfail_list_scalar_constuctor_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "constructor from scalars is not implemented for lists"
)
xfail_list_equals_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "== is not implemented for lists"
)
xfail_list_setitem_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "__setitem__ is not implemented for lists"
)
xfail_missing_list_dict_encode = pytest.mark.xfail_by_type_filter(
[pa.types.is_list],
"ArrowNotImplementedError: dictionary-encode not implemented for list<item: string>",
)
xfail_bool_too_few_uniques = pytest.mark.xfail_by_type_filter(
[pa.types.is_boolean], "Test requires at least 3 unique values"
)
test_types = [
FletcherTestType(
pa.string(),
["🙈", "Ö", "Č", "a", "B"] * 20,
[None, "A"],
["B", "B", None, None, "A", "A", "B", "C"],
["B", "C", "A"],
["B", None, "A"],
lambda: choices(list(string.ascii_letters), k=10),
),
FletcherTestType(
pa.bool_(),
[True, False, True, True, False] * 20,
[None, False],
[True, True, None, None, False, False, True, False],
[True, False, False],
[True, None, False],
lambda: choices([True, False], k=10),
),
FletcherTestType(
pa.int8(),
# Use small values here so that np.prod stays in int32
[2, 1, 1, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int16(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int32(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int64(),
# Use small values here so that np.prod stays in int64
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.float64(),
[2, 1.0, 1.0, 5.5, 6.6] * 20,
[None, 1.1],
[2.5, 2.5, None, None, -100.1, -100.1, 2.5, 100.1],
[2.5, 100.99, -10.1],
[2.5, None, -10.1],
lambda: choices([2.5, 1.0, -1.0, 0, 66.6], k=10),
),
# Most of the tests fail as assert_extension_array_equal casts to numpy object
# arrays and on them equality is not defined.
pytest.param(
FletcherTestType(
pa.list_(pa.string()),
[["B", "C"], ["A"], [None], ["A", "A"], []] * 20,
[None, ["A"]],
[["B"], ["B"], None, None, ["A"], ["A"], ["B"], ["C"]],
[["B"], ["C"], ["A"]],
[["B"], None, ["A"]],
lambda: choices([["B", "C"], ["A"], [None], ["A", "A"]], k=10),
)
),
FletcherTestType(
pa.date64(),
[
datetime.date(2015, 1, 1),
datetime.date(2010, 12, 31),
datetime.date(1970, 1, 1),
datetime.date(1900, 3, 31),
datetime.date(1999, 12, 31),
]
* 20,
[None, datetime.date(2015, 1, 1)],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 2, 2),
None,
None,
datetime.date(2015, 1, 1),
datetime.date(2015, 1, 1),
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
datetime.date(2015, 1, 1),
],
[datetime.date(2015, 2, 2), None, datetime.date(2015, 1, 1)],
lambda: choices(list(pd.date_range("2010-1-1", "2011-1-1").date), k=10),
),
]
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series."""
return request.param
@pytest.fixture(params=test_types)
def fletcher_type(request):
return request.param
@pytest.fixture(autouse=True)
def skip_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("skip_by_type_filter"):
for marker in request.node.iter_markers("skip_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.skip(f"skipped for type: {fletcher_type}")
@pytest.fixture(autouse=True)
def xfail_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("xfail_by_type_filter"):
for marker in request.node.iter_markers("xfail_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.xfail(f"XFAIL for type: {fletcher_type}")
@pytest.fixture
def dtype(fletcher_type, fletcher_dtype):
return fletcher_dtype(fletcher_type.dtype)
@pytest.fixture
def data(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_twos(dtype, fletcher_type, fletcher_array):
if dtype._is_numeric:
return fletcher_array([2] * 100, dtype=fletcher_type.dtype)
else:
return None
@pytest.fixture
def data_missing(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data_missing, dtype=fletcher_type.dtype)
@pytest.fixture
def data_repeated(fletcher_type, fletcher_array):
"""Return different versions of data for count times."""
pass # noqa
def gen(count):
for _ in range(count):
yield fletcher_array(
fletcher_type.data_repeated(), dtype=fletcher_type.dtype
)
yield gen
@pytest.fixture
def data_for_grouping(fletcher_type, fletcher_array):
"""Fixture with data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
return fletcher_array(fletcher_type.data_for_grouping, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
return fletcher_array(fletcher_type.data_for_sorting, dtype=fletcher_type.dtype)
@pytest.fixture
def data_missing_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
return fletcher_array(
fletcher_type.data_missing_for_sorting, dtype=fletcher_type.dtype
)
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Return a simple fixture for festing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
class TestBaseCasting(BaseCastingTests):
pass
class TestBaseConstructors(BaseConstructorsTests):
def test_from_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.xfail(
"String construction is failing as Pandas wants to pass the FletcherChunkedDtype to NumPy"
)
BaseConstructorsTests.test_from_dtype(self, data)
@xfail_list_scalar_constuctor_not_implemented
def test_series_constructor_scalar_with_index(self, data, dtype):
if PANDAS_GE_1_1_0:
BaseConstructorsTests.test_series_constructor_scalar_with_index(
self, data, dtype
)
class TestBaseDtype(BaseDtypeTests):
pass
class TestBaseGetitemTests(BaseGetitemTests):
def test_loc_iloc_frame_single_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/27673"
)
else:
BaseGetitemTests.test_loc_iloc_frame_single_dtype(self, data)
class TestBaseGroupbyTests(BaseGroupbyTests):
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_agg(self, as_index, data_for_grouping)
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_groupby_extension_no_sort(self, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_no_sort(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_transform(self, data_for_grouping):
if pa.types.is_boolean(data_for_grouping.dtype.arrow_dtype):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
result = df.groupby("B").A.transform(len)
# Expected grouping is different as we only have two non-null values
expected = pd.Series([3, 3, 3, 3, 3, 3], name="A")
self.assert_series_equal(result, expected)
else:
BaseGroupbyTests.test_groupby_extension_transform(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op # noqa: F811
):
BaseGroupbyTests.test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op
)
class TestBaseInterfaceTests(BaseInterfaceTests):
@pytest.mark.xfail(
reason="view or self[:] returns a shallow copy in-place edits are not backpropagated"
)
def test_view(self, data):
BaseInterfaceTests.test_view(self, data)
def test_array_interface(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Not sure whether this test really holds for list")
else:
BaseInterfaceTests.test_array_interface(self, data)
@xfail_list_setitem_not_implemented
def test_copy(self, data):
BaseInterfaceTests.test_array_interface(self, data)
class TestBaseMethodsTests(BaseMethodsTests):
# https://github.com/pandas-dev/pandas/issues/22843
@pytest.mark.skip(reason="Incorrect expected")
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, dtype):
pass
@xfail_list_equals_not_implemented
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box): # noqa: F811
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_equals(self, data, na_value, as_series, box)
@xfail_missing_list_dict_encode
def test_value_counts_with_normalize(self, data):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_value_counts_with_normalize(self, data)
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
# Fletcher returns 'fletcher_chunked[bool]' instead of np.bool as dtype
orig_data1, orig_data2 = data_repeated(2)
if pa.types.is_list(orig_data1.dtype.arrow_dtype):
return pytest.skip("__le__ not implemented for list scalars with None")
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence([a <= val for a in list(orig_data1)])
)
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated, dtype):
if dtype.name in [
"fletcher_chunked[date64[ms]]",
"fletcher_continuous[date64[ms]]",
]:
pytest.skip(
"unsupported operand type(s) for +: 'datetime.date' and 'datetime.date"
)
else:
|
BaseMethodsTests.test_combine_add(self, data_repeated)
|
pandas.tests.extension.base.BaseMethodsTests.test_combine_add
|
#!/usr/bin/env python
# coding: utf-8
# "With whom do users initiate?" Mlogit Modeling
# ===
#
# This version is the script version.
import os
import re
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
import sqlite3
from tqdm import tqdm
import random
import pickle
from datetime import datetime
import bisect
import matplotlib.pyplot as plt
import matplotlib.dates as md
import matplotlib
import pylab as pl
from IPython.core.display import display, HTML
import networkx as nx
import sys
# if set to True, will generate in the test data range
# Otherwise, will generate in the train data range
# This is definited by model_start_timestamp below
should_generate_test_data = False
working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/author_initiations"
assert os.path.exists(working_dir)
start_date = datetime.fromisoformat('2005-01-01')
start_timestamp = int(start_date.timestamp() * 1000)
end_date = datetime.fromisoformat('2016-06-01')
end_timestamp = int(end_date.timestamp() * 1000)
subset_start_date = datetime.fromisoformat('2014-01-01')
subset_start_timestamp = int(subset_start_date.timestamp() * 1000)
##### Data reading
# load the list of valid users
data_selection_working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/data_selection"
valid_user_ids = set()
with open(os.path.join(data_selection_working_dir, "valid_user_ids.txt"), 'r') as infile:
for line in infile:
user_id = line.strip()
if user_id == "":
continue
else:
valid_user_ids.add(int(user_id))
# load the list of valid sites
data_selection_working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/data_selection"
valid_site_ids = set()
with open(os.path.join(data_selection_working_dir, "valid_site_ids.txt"), 'r') as infile:
for line in infile:
site_id = line.strip()
if site_id == "":
continue
else:
valid_site_ids.add(int(site_id))
# read the journal metadata with author type info added
s = datetime.now()
author_type_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/author_type"
journal_metadata_filepath = os.path.join(author_type_dir, "journal_metadata_with_author_type.df")
journal_df =
|
pd.read_feather(journal_metadata_filepath)
|
pandas.read_feather
|
# Copyright 2021 <NAME>, spideynolove @ GitHub
# See LICENSE for details.
__author__ = '<NAME> @spideynolove in GitHub'
__version__ = '0.0.1'
# mimic pro code
# from .technical import technical_indicators, moving_averages, pivot_points
import investpy as iv
import os
import numpy as np
import pandas as pd
import datetime
import re
from settings import *
from functools import reduce
from pprint import pprint
'''
# --------- investpy market folder path
equity_path = 'investpy/equitiesdata/'
crypto_path = 'investpy/cryptodata/'
'''
# today = datetime.date.today().strftime("%d/%m/%Y")
today = '19/08/2021'
def convert_date(date):
return date.strftime("%d/%m/%Y")
def calculate_stats(source=combine_path, periods=13,
quotes='cor_bond', interval='Daily'):
df = pd.read_csv(source+f'{quotes}_{interval}.csv')
df = df.iloc[-periods-1:]
df['Mean'] = df.iloc[:, 1:5].mean(axis=1)
df['Std'] = df.iloc[:, 1:5].std(axis=1)
df['Skew'] = df.iloc[:, 1:5].skew(axis=1)
df['Kurt'] = df.iloc[:, 1:5].kurtosis(axis=1)
# error if not have Close columns
df['Change%'] = df['Close'].pct_change()*100
df['Mchange%'] = df['Mean'].pct_change()*100
# consider drop or not
df.drop(columns=['Open', 'High', 'Low'], inplace=True)
df.set_index('Date', inplace=True)
df = df[-periods:]
# print(quotes)
# print(df)
df.to_csv(analysis_path + f'{quotes}_{periods}_{interval}_stats.csv')
def calculate_one_stats():
pass
def correlation_one(source=combine_path, periods=13,
quotes='cor_bond', interval='Daily'):
# read data
df = pd.read_csv(source+f'{quotes}_{interval}.csv')
df = df.iloc[-periods-1:]
df = df.corr()
# print(quotes, periods, interval)
# print(df)
# print()
# print(df.corr()) # method='kendall' / 'spearman'
df.to_csv(analysis_path + f'{quotes}_{periods}_{interval}_corr.csv')
def residuals_formula():
pass
def correlation_two(periods=4, interval='Daily',
dicts={'currenciesdata': 'XAUUSD',
'rates-bondsdata': 'U.S. 10Y'}):
sources = list(dicts.keys())
quotes = list(dicts.values())
df = pd.read_csv(
f'investpy/{sources[0]}/{quotes[0]}_{interval}.csv')
df = df.iloc[-periods-1:]
df.reset_index(inplace=True)
df1 = pd.read_csv(
f'investpy/{sources[1]}/{quotes[1]}_{interval}.csv')
df1 = df1.iloc[-periods-1:]
df1.reset_index(inplace=True)
df_ = list(df.corrwith(df1))
df1_ = list(df.corrwith(df1, axis=1))
return df_[-len(df_)+1:], df1_[-len(df1_)+1:]
def combine_params(filename, params, interval):
check_data(combine_path, f'{filename}_{interval}.csv')
main_df = pd.DataFrame()
for ticker, info in params.items():
if '/' in ticker:
print(f'{ticker} have special /')
ticker = replace_specchar(ticker, '/', '')
df = pd.read_csv(f'investpy/{info[0]}data/{ticker}_{interval}.csv')
df.set_index('Date', inplace=True)
df.rename(columns={'Close': ticker}, inplace=True)
df = df.filter([ticker])
main_df = df if main_df.empty else main_df.join(df, how='outer')
# fillna or dropna
main_df.to_csv(combine_path + f'{filename}_{interval}.csv')
# dump same thing in an list
def dump_things(filename, things, intervals):
for thing, info in things.items():
market, country, infunc = info
for interval in intervals:
print(interval)
infunc(thing, interval, country)
for interval in intervals:
combine_params(filename, things, interval)
def make_market(params, isReload=True):
intervals = ['Daily', 'Weekly', 'Monthly']
# filename, data, info, outfunc = params
filename, data, info = params
thing_pairs = dict(zip(data, info))
if isReload:
dump_things(filename, thing_pairs, intervals)
# else:
# outfunc(combine_path + f'{filename}_{interval}.csv')
# # read data
def norm_data():
# numpy processing
pass
def append_preparing(path):
df = pd.read_csv(path)
cur_date = df[-1:]['Date'].tolist()[0]
df = df[:-1]
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
df.to_csv(path)
if cur_date == datetime.date.today().strftime('%Y-%m-%d'):
return None
else:
dayRe = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)')
mo = dayRe.search(cur_date)
starttime = mo.group(3) + "/" + mo.group(2) + "/" + mo.group(1)
return starttime
def check_data(folder_part, filename):
# if os.path.exists(folder_part):
# # print(f'{folder_part} already exist!')
# if os.path.exists(folder_part+filename):
# print(f'{folder_part+filename} already exist!')
# return False
# else:
# return True
# else:
# os.makedirs(folder_part)
# return False
return False
def replace_specchar(obj, char, newchar):
tmp = obj
if char in obj:
tmp = obj.replace(char, newchar)
return tmp
# Fred part --------------------------------
def get_economic_fred(currency, item):
# economic_path = f'fred/{currency}/'
economic_path = f'quandl/{currency}/'
# check_data(economic_path)
df = fred.get_series(item, observation_start=starttime,
observation_end=today)
df.to_csv(economic_path + f'{item}.csv')
pass
# quandl part --------------------------------
def get_economic_quandl(currency, field, item):
economic_path = f'quandl/{currency}/'
check_data(economic_path, f'{item}.csv')
df = quandl.get(f'{field}/{item}',
start_date=starttime, end_date=today)
# print(df.tail())
df.to_csv(economic_path + f'{item}.csv')
def get_quandl_data(market, field, currency, item):
# need optimize folder name
quandl_part = f'quandl/{market}data/{currency}/'
# create folder
check_data(quandl_part, f'{item}.csv')
# request data then save to file, start_date, end_date
df = quandl.get(f'{field}/{item}',
start_date=starttime, end_date=today)
print(df.tail())
# df.to_csv(quandl_part + f'{item}.csv')
# return quandl_part + f'{item}.csv'
# --------------------- indices ----------------------------------
def get_index(index, interval, country):
index_ = replace_specchar(index, '/', '')
path = f'investpy/indicesdata/{index_}_{interval}.csv'
if not check_data('investpy/indicesdata/', f'{index_}_{interval}.csv'):
df = iv.indices.get_index_historical_data(
index=index, country=country, from_date=starttime,
to_date=today, order='ascending', interval=interval)
df.to_csv(path)
# else:
# new_start = append_preparing(path)
# print(new_start, today)
# if new_start is not None:
# df = iv.indices.get_index_historical_data(
# index=index, country=country, from_date=new_start,
# to_date=today, order='ascending', interval=interval)
# df.to_csv(path, mode='a', header=False)
def get_indices(isReload=True):
data = ['US Dollar Index', 'PHLX Euro',
'PHLX Australian Dollar', 'PHLX Canadian Dollar',
'PHLX Swiss Franc', 'PHLX British Pound',
'PHLX Yen', 'PHLX New Zealand Dollar']
info = [[markets[0], 'united states', get_index]]*len(data)
# params = ['currencyindex', data, info, analysis_index]
params = ['currencyindex', data, info]
# make_market(params, isReload)
# ------------------- bonds -----------------------------
def get_bond(bond, interval, country):
path = f'investpy/rates-bondsdata/{bond}_{interval}.csv'
if not check_data('investpy/rates-bondsdata/', f'{bond}_{interval}.csv'):
df = iv.bonds.get_bond_historical_data(
bond=bond, from_date=starttime, to_date=today,
order='ascending', interval=interval)
df.to_csv(path)
# else:
# new_start = append_preparing(path)
# if new_start is not None:
# df = iv.bonds.get_bond_historical_data(
# bond=bond, from_date=new_start, to_date=today,
# order='ascending', interval=interval)
# df.to_csv(path, mode='a', header=False)
def get_bonds(isReload=True):
data = ['U.S. 10Y', 'Canada 10Y',
'Japan 10Y', 'Switzerland 10Y',
'Australia 10Y', 'New Zealand 10Y',
'Germany 10Y', 'U.K. 10Y']
info = [[markets[3], 'united states', get_bond]]*len(data)
# params = ['cor_bond', data, info, analysis_bond]
params = ['cor_bond', data, info]
# make_market(params, isReload)
def get_bond_spread(periods=6, name='cor_bond',
interval='Monthly', base='U.S. 10Y'):
# https://pypi.org/project/nelson-siegel-svensson/0.1.0/
# https://pypi.org/project/yield-curve-dynamics/
# read data
df = pd.read_csv(combine_path + f'{name}_{interval}.csv')
# get a range value
df = df.iloc[-periods-1:]
# move base to the first columns
first_column = df.pop(base)
df.insert(1, base, first_column)
# list(df)[1:] mean quotes list
df.dropna(subset=list(df)[1:], how='any', inplace=True)
# calculate spread by subtract to the base
df.iloc[:, 1:9] = df.iloc[:, 1:9].sub(df[base], axis=0).pct_change()*100
# drop zero base column
df.drop(base, axis=1, inplace=True)
# remove first empty row
df = df[-len(df)+1:]
# set Date as index
df.set_index('Date', inplace=True)
# write to file
df.to_csv(analysis_path + f'{base}_spread_{periods}_{interval}.csv')
# ----------- get_currency_cross_historical_data ---------
def get_forex(quote, interval, country):
quote_ = replace_specchar(quote, '/', '')
path = f'investpy/currenciesdata/{quote_}_{interval}.csv'
if not check_data('investpy/currenciesdata/', f'{quote_}_{interval}.csv'):
# check latest data
print("check_data True")
df = iv.currency_crosses.get_currency_cross_historical_data(
currency_cross=quote, from_date=starttime, to_date=today,
order='ascending', interval=interval)
df = df.iloc[:, :-1]
df.to_csv(path)
else:
print("check_data False")
new_start = append_preparing(path)
print(new_start, today)
if new_start is not None:
df = iv.currency_crosses.get_currency_cross_historical_data(
currency_cross=quote, from_date=new_start, to_date=today,
order='ascending', interval=interval)
df = df.iloc[:, :-1]
df.to_csv(path, mode='a', header=False)
def get_goldpairs(isReload=True):
data = ['XAU/USD', 'XAU/EUR', 'XAU/GBP', 'XAU/CAD',
'XAU/CHF', 'XAU/JPY', 'XAU/AUD', 'XAU/NZD']
info = [[markets[1], 'united states', get_forex]]*len(data)
# params = ['xaupair', data, info, analysis_currency]
params = ['xaupair', data, info]
# make_market(params, isReload)
def get_silverpairs(isReload=True):
data = ['XAG/USD', 'XAG/EUR', 'XAG/GBP',
'XAG/CAD', 'XAG/CHF', 'XAG/AUD']
info = [[markets[1], 'united states', get_forex]]*len(data)
# params = ['xagpair', data, info, analysis_currency]
params = ['xagpair', data, info]
# make_market(params, isReload)
# ----------------------------IMPORTANT- commondity---
# ------------- get_commodity_historical_data ---------------
def get_commodities(commodity, interval, country):
path = f'investpy/commoditiesdata/{commodity}_{interval}.csv'
compath = 'investpy/commoditiesdata/'
if not check_data(compath, f'{commodity}_{interval}.csv'):
df = iv.commodities.get_commodity_historical_data(
commodity=commodity, from_date=starttime, to_date=today,
order='ascending', interval=interval)
df.to_csv(path)
else:
new_start = append_preparing(path)
if new_start is not None:
df = iv.commodities.get_commodity_historical_data(
commodity=commodity, from_date=new_start, to_date=today,
order='ascending', interval=interval)
df.to_csv(path, mode='a', header=False)
def get_grains(isReload=True):
# https://www.investing.com/commodities/grains
data = ['Rough Rice', 'US Soybean Oil',
'US Soybean Meal', 'US Soybeans',
'US Wheat', 'US Corn', 'Oats', 'London Wheat']
info = [[markets[2], 'united states', get_commodities]]*len(data)
# params = ['grain', data, info, analysis_commodity]
params = ['grain', data, info]
# make_market(params, isReload)
def get_softs(isReload=True):
# https://www.investing.com/commodities/softs
data = ['US Coffee C', 'US Cotton #2',
'US Sugar #11', 'Orange Juice',
'US Cocoa', 'Lumber', 'London Cocoa',
'London Coffee', 'London Sugar']
info = [[markets[2], 'united states', get_commodities]]*len(data)
# params = ['soft', data, info, analysis_commodity]
params = ['soft', data, info]
# make_market(params, isReload)
# shortcut: filename + dataset
def get_meats(isReload=True):
# https://www.investing.com/commodities/meats
data = ['Live Cattle', 'Lean Hogs', 'Feeder Cattle']
info = [[markets[2], 'united states', get_commodities]]*len(data)
# params = ['meat', data, info, analysis_commodity]
params = ['meat', data, info]
# make_market(params, isReload)
def get_metals(isReload=True):
# https://www.investing.com/commodities/metals
data = ['Gold', 'Silver', 'Copper', 'Palladium', 'Platinum',
'Aluminum', 'Zinc', 'Lead', 'Nickel', 'Tin']
info = [[markets[2], 'united states', get_commodities]]*len(data)
# params = ['metal', data, info, analysis_commodity]
params = ['metal', data, info]
# make_market(params, isReload)
def get_energies(isReload=True):
# https://www.investing.com/commodities/energy
data = ['Brent Oil', 'Crude Oil WTI',
'London Gas Oil', 'Natural Gas',
'Heating Oil', 'Carbon Emissions',
'Gasoline RBOB']
info = [[markets[2], 'united states', get_commodities]]*len(data)
# params = ['energy', data, info, analysis_commodity]
params = ['energy', data, info]
# make_market(params, isReload)
# ----------------Commondity index-------------------------
# https://www.investing.com/indices/thomson-reuters---jefferies-crb
def get_crb(isReload=True):
intervals = ['Daily', 'Weekly', 'Monthly']
for interval in intervals:
get_index('TR/CC CRB', interval, 'world')
# ---------------- ETF -------------------------
def etf_percent():
pass
def get_etf(etf, interval, country):
path = f'investpy/etfsdata/{etf}_{interval}.csv'
if not check_data('investpy/etfsdata/', f'{etf}_{interval}.csv'):
df = iv.etfs.get_etf_historical_data(
etf=etf, country=country, from_date=starttime,
to_date=today, order='ascending', interval=interval)
df.to_csv(path)
# else:
# new_start = append_preparing(path)
# if new_start is not None:
# df = iv.etfs.get_etf_historical_data(
# etf=etf, country=country, from_date=new_start,
# to_date=today,
# order='ascending', interval=interval)
# df.to_csv(path, mode='a', header=False)
# pass
def get_bondetfs(isReload=True):
data = ['iShares Core US Aggregate Bond',
'Vanguard Total Bond Market',
'Vanguard Intermediate-Term Corporate Bond',
'Vanguard Total International Bond',
'Vanguard Short-Term Corporate Bond']
info = [[markets[5], 'united states', get_etf]]*len(data)
# params = ['bondetfs', data, info, analysis_etf]
params = ['bondetfs', data, info]
# make_market(params, isReload)
pass
def get_stocketfs(isReload=True):
data = ['SPDR S&P 500', 'ishares S&P 500',
'Vanguard Total Stock Market',
'Vanguard S&P 500',
'Invesco QQQ Trust Series 1']
info = [[markets[5], 'united states', get_etf]]*len(data)
# params = ['stocketfs', data, info, analysis_etf]
params = ['stocketfs', data, info]
# make_market(params, isReload)
pass
def get_goldetfs(isReload=True):
data = ['SPDR Gold Shares', 'iShares Gold',
'SPDR Gold MiniShares',
'ETFS Physical Swiss Gold Shares',
'GraniteShares Gold Trust']
info = [[markets[5], 'united states', get_etf]]*len(data)
# params = ['goldetfs', data, info, analysis_etf]
params = ['goldetfs', data, info]
# make_market(params, isReload)
def get_silveretfs(isReload=True):
# 'United States Copper' - not use
data = ['iShares Silver', 'ETFS Physical Silver Shares',
'ProShares Ultra Silver']
info = [[markets[5], 'united states', get_etf]]*len(data)
# params = ['silveretfs', data, info, analysis_etf]
params = ['silveretfs', data, info]
# make_market(params, isReload)
# ----------------Correlation-------------------------
# -------------------------------------
# AUD vs NZD (correlation)
def get_aunz(isReload=True):
data = ['PHLX Australian Dollar', 'PHLX New Zealand Dollar',
'Australia 10Y', 'New Zealand 10Y']
info = [[markets[0], 'united states', get_index]] * \
2 + [[markets[3], 'united states', get_bond]]*2
# params = ['cor_aunz', data, info, analysis_intermarket]
params = ['cor_aunz', data, info]
make_market(params, isReload)
# -------------------------------------
# USD vs CAD (correlation)
def get_usca(isReload=True):
data = ['US Dollar Index', 'PHLX Canadian Dollar',
'U.S. 10Y', 'Canada 10Y']
info = [[markets[0], 'united states', get_index]] * \
2 + [[markets[3], 'united states', get_bond]]*2
# params = ['cor_usca', data, info, analysis_intermarket]
params = ['cor_usca', data, info]
make_market(params, isReload)
# -------------------------------------
# JPY vs CHF (correlation)
def get_jpsw(isReload=True):
data = ['PHLX Yen', 'PHLX Swiss Franc', 'Japan 10Y', 'Switzerland 10Y']
info = [[markets[0], 'united states', get_index]] * \
2 + [[markets[3], 'united states', get_bond]]*2
# params = ['cor_jpsw', data, info, analysis_intermarket]
params = ['cor_jpsw', data, info]
make_market(params, isReload)
# -------------------------------------
# GBP vs EUR (correlation)
def get_ukeu(isReload=True):
data = ['PHLX British Pound', 'PHLX Euro', 'U.K. 10Y', 'Germany 10Y']
info = [[markets[0], 'united states', get_index]] * \
2 + [[markets[3], 'united states', get_bond]]*2
# params = ['cor_ukeu', data, info, analysis_intermarket]
params = ['cor_ukeu', data, info]
make_market(params, isReload)
# ----------------------------------
# https://www.investing.com/economic-calendar/
def get_economic_calendar():
'''
countries = ['united states', 'united kingdom', 'australia', 'canada',
'switzerland', 'germany', 'japan', 'new zealand', 'china']
importances = ['high', 'medium']
today = date.today()
# get entire month (month have??? day)
week_ago = (today + datetime.timedelta(days=6))
# print(today, week_ago)
df = iv.economic_calendar(time_zone='GMT +7:00', time_filter='time_only',
countries=countries, importances=importances,
categories=None, from_date=convert_date(today),
to_date=convert_date(week_ago))
df.to_csv('investpy/calendar/economic_calendar.csv')
'''
df = pd.read_csv('investpy/calendar/economic_calendar.csv', index_col=0)
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
print(df)
pass
def csv_finder(foldername):
paths = []
for root, dirs, files in os.walk(foldername):
for file in files:
if file.endswith(".csv"):
paths.append(os.path.abspath(os.path.join(root, file)))
return paths
def read_data_vol():
non_vols = []
vols = []
paths = csv_finder("investpy")
for count, item in enumerate(paths, 1):
df =
|
pd.read_csv(item)
|
pandas.read_csv
|
from __future__ import print_function, absolute_import, division
import pandas as pd
import numpy as np
import argparse
import json
import math
import re
import os
import sys
import csv
import socket # -- ip checks
import seaborn as sns
import matplotlib.pyplot as plt
from jinja2 import Environment, PackageLoader
# --- functions ---
def get_config(config):
""" convert json config file into a python dict """
with open(config, 'r') as f:
config_dict = json.load(f)[0]
return config_dict
# -- load data --
def get_dataframe(config):
""" load csv into python dataframe """
df = pd.read_csv(config['input_file'], low_memory=False)
return df
# --
def get_overview(config, df):
""" return details of the dataframe and any issues found """
overview_msg = {}
df = df.copy()
column_cnt = len(df.columns)
try:
df['EVENT_TIMESTAMP'] = pd.to_datetime(df[config['required_features']['EVENT_TIMESTAMP']], infer_datetime_format=True)
date_range = df['EVENT_TIMESTAMP'].min().strftime('%Y-%m-%d') + ' to ' + df['EVENT_TIMESTAMP'].max().strftime('%Y-%m-%d')
day_cnt = (df['EVENT_TIMESTAMP'].max() - df['EVENT_TIMESTAMP'].min()).days
except:
overview_msg[config['required_features']['EVENT_TIMESTAMP']] = " Unable to convert" + config['required_features']['EVENT_TIMESTAMP'] + " to timestamp"
date_range = ""
day_cnt = 0
record_cnt = df.shape[0]
memory_size = df.memory_usage(index=True).sum()
record_size = round(float(memory_size) / record_cnt,2)
n_dupe = record_cnt - len(df.drop_duplicates())
if record_cnt <= 10000:
overview_msg["Record count"] = "A minimum of 10,000 rows are required to train the model, your dataset contains " + str(record_cnt)
overview_stats = {
"Record count" : "{:,}".format(record_cnt) ,
"Column count" : "{:,}".format(column_cnt),
"Duplicate count" : "{:,}".format(n_dupe),
"Memory size" : "{:.2f}".format(memory_size/1024**2) + " MB",
"Record size" : "{:,}".format(record_size) + " bytes",
"Date range" : date_range,
"Day count" : "{:,}".format(day_cnt) + " days",
"overview_msg" : overview_msg,
"overview_cnt" : len(overview_msg)
}
return df, overview_stats
def set_feature(row, config):
""" sets the feature type of each variable in the file, identifies features with issues
as well as the required features. this is the first pass of rules
"""
rulehit = 0
feature = ""
message = ""
required_features = config['required_features']
# -- assign numeric --
if ((row._dtype in ['float64', 'int64']) and (row['nunique'] > 1)):
feature = "numeric"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign categorical --
if ((row._dtype == 'object') and ( row.nunique_pct <= 0.75)):
feature = "categorical"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- assign categorical to numerics --
if ((row._dtype in ['float64', 'int64']) and ( row['nunique'] <= 1024 )):
feature = "categorical"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign binary --
if (row['nunique'] == 2 ):
feature = "categorical"
message = "(" + "{:}".format(row['nunique']) + ") binary"
# -- single value --
if (row['nunique'] == 1):
rulehit = 1
feature = "exclude"
message = "(" + "{:}".format(row['nunique']) + ") single value"
# -- null pct --
if (row.null_pct >= 0.50 and (rulehit == 0)):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.null_pct*100) + "%) missing "
# -- categorical w. high % unique
if ((row._dtype == 'object') and ( row.nunique_pct >= 0.75)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- numeric w. extreeme % unique
if ((row._dtype in ['float64', 'int64']) and ( row.nunique_pct >= 0.95)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
if ('EMAIL_ADDRESS' in required_features) and (row._column == required_features['EMAIL_ADDRESS']):
feature = "EMAIL_ADDRESS"
if ('IP_ADDRESS' in required_features) and (row._column == required_features['IP_ADDRESS']):
feature = "IP_ADDRESS"
if row._column == required_features['EVENT_TIMESTAMP']:
feature = "EVENT_TIMESTAMP"
if row._column == required_features['EVENT_LABEL']:
feature = "EVENT_LABEL"
return feature, message
def get_label(config, df):
""" returns stats on the label and performs intial label checks """
message = {}
label = config['required_features']['EVENT_LABEL']
label_summary = df[label].value_counts()
rowcnt = df.shape[0]
label_dict = {
"label_field" : label,
"label_values" : df[label].unique(),
"label_dtype" : label_summary.dtype,
"fraud_rate" : "{:.2f}".format((label_summary.min()/label_summary.sum())*100),
"fraud_label": str(label_summary.idxmin()),
"fraud_count": label_summary.min(),
"legit_rate" : "{:.2f}".format((label_summary.max()/label_summary.sum())*100),
"legit_count": label_summary.max(),
"legit_label": str(label_summary.idxmax()),
"null_count" : "{:,}".format(df[label].isnull().sum(axis = 0)),
"null_rate" : "{:.2f}".format(df[label].isnull().sum(axis = 0)/rowcnt),
}
"""
label checks
"""
if label_dict['fraud_count'] <= 500:
message['fraud_count'] = "Fraud count " + str(label_dict['fraud_count']) + " is less than 500\n"
if df[label].isnull().sum(axis = 0)/rowcnt >= 0.01:
message['label_nulls'] = "Your LABEL column contains " + label_dict["null_count"] +" a significant number of null values"
label_dict['warnings'] = len(message)
return label_dict, message
def get_partition(config, df):
""" evaluates your dataset partitions and checks the distribution of fraud lables """
df = df.copy()
row_count = df.shape[0]
required_features = config['required_features']
message = {}
stats ={}
try:
df['_event_timestamp'] =
|
pd.to_datetime(df[required_features['EVENT_TIMESTAMP']])
|
pandas.to_datetime
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step01 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step01&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-1).
# +
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.pricing import bsm_function, bootstrap_nelson_siegel, \
implvol_delta2m_moneyness
from arpym.tools import aggregate_rating_migrations, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-parameters)
# +
# set current time t_now
t_now = np.datetime64('2012-08-31')
# set start date for data selection
t_first = np.datetime64('2009-11-02')
# set initial portfolio construction date t_init
t_init = np.datetime64('2012-08-30')
# stocks - must include GE and JPM
stock_names = ['GE', 'JPM', 'A', 'AA', 'AAPL'] # stocks considered
# make sure stock names includes GE and JPM
stock_names = ['GE', 'JPM'] + [stock
for stock in stock_names
if stock not in ['GE', 'JPM']]
print('Stocks considered:', stock_names)
# options on S&P 500
k_strk = 1407 # strike value of options on S&P 500 (US dollars)
tend_option = np.datetime64('2013-08-26') # options expiry date
y = 0.01 # level for yield curve (assumed flat and constant)
l_ = 9 # number of points on the m-moneyness grid
# corporate bonds
# expiry date of the GE coupon bond to extract
tend_ge = np.datetime64('2013-09-16')
# expiry date of the JPM coupon bond to extract
tend_jpm = np.datetime64('2014-01-15')
# starting ratings following the table:
# "AAA" (0), "AA" (1), "A" (2), "BBB" (3), "BB" (4), "B" (5),
# "CCC" (6), "D" (7)
ratings_tnow = np.array([5, # initial credit rating for GE (corresponding to B)
3]) # initial credit rating for JPM (corresponding to BBB)
# start of period for aggregate credit risk drivers
tfirst_credit = np.datetime64('1995-01-01')
# end of period for aggregate credit risk drivers
tlast_credit = np.datetime64('2004-12-31')
# index of risk driver to plot
d_plot = 1
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step00): Import data
# +
# upload data
# stocks
stocks_path = '../../../databases/global-databases/equities/db_stocks_SP500/'
db_stocks = pd.read_csv(stocks_path + 'db_stocks_sp.csv', skiprows=[0],
index_col=0)
db_stocks.index = pd.to_datetime(db_stocks.index)
# implied volatility of option on S&P 500 index
path = '../../../databases/global-databases/derivatives/db_implvol_optionSPX/'
db_impliedvol = pd.read_csv(path + 'data.csv',
index_col=['date'], parse_dates=['date'])
implvol_param = pd.read_csv(path + 'params.csv', index_col=False)
# corporate bonds: GE and JPM
jpm_path = \
'../../../databases/global-databases/fixed-income/db_corporatebonds/JPM/'
db_jpm = pd.read_csv(jpm_path + 'data.csv',
index_col=['date'], parse_dates=['date'])
jpm_param = pd.read_csv(jpm_path + 'params.csv',
index_col=['expiry_date'], parse_dates=['expiry_date'])
jpm_param['link'] = ['dprice_'+str(i) for i in range(1, jpm_param.shape[0]+1)]
ge_path = '../../../databases/global-databases/fixed-income/db_corporatebonds/GE/'
db_ge = pd.read_csv(ge_path + 'data.csv',
index_col=['date'], parse_dates=['date'])
ge_param = pd.read_csv(ge_path + 'params.csv',
index_col=['expiry_date'], parse_dates=['expiry_date'])
ge_param['link'] = ['dprice_'+str(i) for i in range(1, ge_param.shape[0]+1)]
# ratings
rating_path = '../../../databases/global-databases/credit/db_ratings/'
db_ratings = pd.read_csv(rating_path+'data.csv', parse_dates=['date'])
# ratings_param represents all possible ratings i.e. AAA, AA, etc.
ratings_param = pd.read_csv(rating_path+'params.csv', index_col=0)
ratings_param = np.array(ratings_param.index)
c_ = len(ratings_param)-1
# define the date range of interest
dates = db_stocks.index[(db_stocks.index >= t_first) &
(db_stocks.index <= t_now)]
dates = np.intersect1d(dates, db_impliedvol.index)
dates = dates.astype('datetime64[D]')
# the corporate bonds time series is shorter; select the bonds dates
ind_dates_bonds = np.where((db_ge.index >= dates[0]) &
(db_ge.index <= t_now))
dates_bonds = np.intersect1d(db_ge.index[ind_dates_bonds], db_jpm.index)
dates_bonds = dates_bonds.astype('datetime64[D]')
# length of the time series
t_ = len(dates)
t_bonds = len(dates_bonds)
# initialize temporary databases
db_risk_drivers = {}
v_tnow = {}
v_tinit = {}
risk_drivers_names = {}
v_tnow_names = {}
# implied volatility parametrized by time to expiry and delta-moneyness
tau_implvol = np.array(implvol_param.time2expiry)
tau_implvol = tau_implvol[~np.isnan(tau_implvol)]
delta_moneyness = np.array(implvol_param.delta)
implvol_delta_moneyness_2d = \
db_impliedvol.loc[(db_impliedvol.index.isin(dates)),
(db_impliedvol.columns != 'underlying')]
k_ = len(tau_implvol)
# unpack flattened database (from 2d to 3d)
implvol_delta_moneyness_3d = np.zeros((t_, k_, len(delta_moneyness)))
for k in range(k_):
implvol_delta_moneyness_3d[:, k, :] = \
np.r_[np.array(implvol_delta_moneyness_2d.iloc[:, k::k_])]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step01): Stocks
# +
n_stocks = len(stock_names) # number of stocks
d_stocks = n_stocks # one risk driver for each stock
for d in range(d_stocks):
# calculate time series of stock risk drivers
db_risk_drivers[d] = np.log(np.array(db_stocks.loc[dates, stock_names[d]]))
risk_drivers_names[d] = 'stock '+stock_names[d]+'_log_value'
# stock value
v_tnow[d] = db_stocks.loc[t_now, stock_names[d]]
v_tinit[d] = db_stocks.loc[t_init, stock_names[d]]
v_tnow_names[d] = 'stock '+stock_names[d]
# number of risk drivers, to be updated at every insertion
d_ = d_stocks
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step02): S&P 500 Index
# +
# calculate risk driver of the S&P 500 index
db_risk_drivers[d_] = \
np.log(np.array(db_impliedvol.loc[(db_impliedvol.index.isin(dates)),
'underlying']))
risk_drivers_names[d_] = 'sp_index_log_value'
# value of the S&P 500 index
v_tnow[d_] = db_impliedvol.loc[t_now, 'underlying']
v_tinit[d_] = db_impliedvol.loc[t_init, 'underlying']
v_tnow_names[d_] = 'sp_index'
# update counter
d_ = d_+1
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step03): Call and put options on the S&P 500 Index
# +
# from delta-moneyness to m-moneyness parametrization
implvol_m_moneyness_3d, m_moneyness = \
implvol_delta2m_moneyness(implvol_delta_moneyness_3d, tau_implvol,
delta_moneyness, y*np.ones((t_, k_)),
tau_implvol, l_)
# calculate log implied volatility
log_implvol_m_moneyness_2d = \
np.log(np.reshape(implvol_m_moneyness_3d,
(t_, k_*(l_)), 'F'))
# value of the underlying
s_tnow = v_tnow[d_stocks]
s_tinit = v_tinit[d_stocks]
# time to expiry (in years)
tau_option_tnow = np.busday_count(t_now, tend_option)/252
tau_option_tinit = np.busday_count(t_init, tend_option)/252
# moneyness
moneyness_tnow = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow)
moneyness_tinit = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow)
# grid points
points = list(zip(*[grid.flatten() for grid in np.meshgrid(*[tau_implvol,
m_moneyness])]))
# known values
values = implvol_m_moneyness_3d[-1, :, :].flatten('F')
# implied volatility (interpolated)
impl_vol_tnow = \
interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tnow,
moneyness_tnow])
impl_vol_tinit = \
interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tinit,
moneyness_tinit])
# compute call option value by means of Black-Scholes-Merton formula
v_call_tnow = bsm_function(s_tnow, y, impl_vol_tnow, moneyness_tnow, tau_option_tnow)
v_call_tinit = bsm_function(s_tinit, y, impl_vol_tinit, moneyness_tinit,
tau_option_tinit)
# compute put option value by means of the put-call parity
v_zcb_tnow = np.exp(-y*tau_option_tnow)
v_put_tnow = v_call_tnow - s_tnow + k_strk*v_zcb_tnow
v_zcb_tinit = np.exp(-y*tau_option_tinit)
v_put_tinit = v_call_tinit - s_tinit + k_strk*v_zcb_tinit
# store data
d_implvol = log_implvol_m_moneyness_2d.shape[1]
for d in np.arange(d_implvol):
db_risk_drivers[d_+d] = log_implvol_m_moneyness_2d[:, d]
risk_drivers_names[d_+d] = 'option_spx_logimplvol_mtau_' + str(d+1)
v_tnow[d_] = v_call_tnow
v_tinit[d_] = v_call_tinit
v_tnow_names[d_] = 'option_spx_call'
v_tnow[d_+1] = v_put_tnow
v_tinit[d_+1] = v_put_tinit
v_tnow_names[d_+1] = 'option_spx_put'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow)
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step04): Corporate bonds
# +
n_bonds = 2
# GE bond
# extract coupon
coupon_ge = ge_param.loc[tend_ge, 'coupons']/100
# rescaled dirty prices of GE bond
v_bond_ge = db_ge.loc[db_ge.index.isin(dates_bonds)]/100
# computation of Nelson-Siegel parameters for GE bond
theta_ge = np.zeros((t_bonds, 4))
theta_ge = bootstrap_nelson_siegel(v_bond_ge.values, dates_bonds,
np.array(ge_param.coupons/100),
ge_param.index.values.astype('datetime64[D]'))
# risk drivers for bonds are Nelson-Siegel parameters
for d in np.arange(4):
if d == 3:
db_risk_drivers[d_+d] = np.sqrt(theta_ge[:, d])
else:
db_risk_drivers[d_+d] = theta_ge[:, d]
risk_drivers_names[d_+d] = 'ge_bond_nel_sieg_theta_' + str(d+1)
# store dirty price of GE bond
# get column variable name in v_bond_ge that selects bond with correct expiry
ge_link = ge_param.loc[tend_ge, 'link']
v_tnow[n_] = v_bond_ge.loc[t_now, ge_link]
v_tinit[n_] = v_bond_ge.loc[t_init, ge_link]
v_tnow_names[n_] = 'ge_bond'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow_names)
# JPM bond
# extract coupon
coupon_jpm = jpm_param.loc[tend_jpm, 'coupons']/100
# rescaled dirty prices of JPM bond
v_bond_jpm = db_jpm.loc[db_ge.index.isin(dates_bonds)]/100
# computation of Nelson-Siegel parameters for JPM bond
theta_jpm = np.zeros((t_bonds, 4))
theta_jpm = bootstrap_nelson_siegel(v_bond_jpm.values, dates_bonds,
np.array(jpm_param.coupons/100),
jpm_param.index.values.astype('datetime64[D]'))
# risk drivers for bonds are Nelson-Siegel parameters
for d in np.arange(4):
if d == 3:
db_risk_drivers[d_+d] = np.sqrt(theta_jpm[:, d])
else:
db_risk_drivers[d_+d] = theta_jpm[:, d]
risk_drivers_names[d_+d] = 'jpm_bond_nel_sieg_theta_'+str(d+1)
# store dirty price of JPM bond
# get column variable name in v_bond_ge that selects bond with correct expiry
jpm_link = jpm_param.loc[tend_jpm, 'link']
v_tnow[n_] = v_bond_jpm.loc[t_now, jpm_link]
v_tinit[n_] = v_bond_jpm.loc[t_init, jpm_link]
v_tnow_names[n_] = 'jpm_bond'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow)
# fill the missing values with nan's
for d in range(d_stocks+1+d_implvol,
d_stocks+1+d_implvol+n_bonds*4):
db_risk_drivers[d] = np.concatenate((np.zeros(t_-t_bonds),
db_risk_drivers[d]))
db_risk_drivers[d][:t_-t_bonds] = np.NAN
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step05): Credit
# +
# extract aggregate credit risk drivers
dates_credit, n_obligors, n_cum_trans, *_ = \
aggregate_rating_migrations(db_ratings, ratings_param, tfirst_credit,
tlast_credit)
# number of obligors in each rating at each t
t_credit = len(dates_credit) # length of the time series
credit_types = {}
credit_series = {}
for c in np.arange(c_+1):
credit_types[c] = 'n_oblig_in_state_'+ratings_param[c]
credit_series[c] = n_obligors[:, c]
d_credit = len(credit_series)
# cumulative number of migrations up to time t for each pair of rating buckets
for i in np.arange(c_+1):
for j in np.arange(c_+1):
if i != j:
credit_types[d_credit] = \
'n_cum_trans_'+ratings_param[i]+'_'+ratings_param[j]
credit_series[d_credit] = n_cum_trans[:, i, j]
d_credit = len(credit_series)
# -
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step06): Save databases
# +
path = '../../../databases/temporary-databases/'
# market risk drivers
out = pd.DataFrame({risk_drivers_names[d]: db_risk_drivers[d]
for d in range(len(db_risk_drivers))}, index=dates)
out = out[list(risk_drivers_names.values())]
out.index.name = 'dates'
out.to_csv(path+'db_riskdrivers_series.csv')
del out
# aggregate credit risk drivers
out = pd.DataFrame({credit_types[d]: credit_series[d]
for d in range(d_credit)},
index=dates_credit)
out = out[list(credit_types.values())]
out.index.name = 'dates'
out.to_csv(path+'db_riskdrivers_credit.csv')
del out
# values of all instruments at t_now
out = pd.DataFrame({v_tnow_names[n]: pd.Series(v_tnow[n])
for n in range(len(v_tnow))})
out = out[list(v_tnow_names.values())]
out.to_csv(path+'db_v_tnow.csv',
index=False)
del out
# values of all instruments at t_init
out = pd.DataFrame({v_tnow_names[n]: pd.Series(v_tinit[n])
for n in range(len(v_tinit))})
out = out[list(v_tnow_names.values())]
out.to_csv(path+'db_v_tinit.csv',
index=False)
del out
# additional variables needed for subsequent steps
out = {'n_stocks': pd.Series(n_stocks),
'd_implvol': pd.Series(d_implvol),
'n_bonds': pd.Series(n_bonds),
'c_': pd.Series(c_),
'tlast_credit': pd.Series(tlast_credit),
'tend_option': pd.Series(tend_option),
'k_strk': pd.Series(k_strk),
'l_': pd.Series(l_),
'tau_implvol': pd.Series(tau_implvol),
'y': pd.Series(y),
'm_moneyness': pd.Series(m_moneyness),
'tend_ge': pd.Series(tend_ge),
'coupon_ge': pd.Series(coupon_ge),
'tend_jpm': pd.Series(tend_jpm),
'coupon_jpm': pd.Series(coupon_jpm),
'd_': pd.Series(d_),
'd_credit': pd.Series(d_credit),
'ratings_tnow':
|
pd.Series(ratings_tnow)
|
pandas.Series
|
import pandas as pd
import numpy as np
import dateutil
import networkx as nx
ADULT_AGE = 18
def get_hmis_cp():
"""
Pull in relevant CSVs from `../data/`, merge them, clean them, and return a tuple containing the cleaned HMIS data
and the cleaned Connecting Point data.
"""
# get raw dataframes
hmis = get_raw_hmis()
cp = get_raw_cp()
# convert dates
hmis = hmis_convert_dates(hmis)
cp = cp_convert_dates(cp)
# compute client and family ids across the dataframes
(hmis, cp) = get_client_family_ids(hmis, cp)
# get child status
hmis = hmis_child_status(hmis)
cp = cp_child_status(cp)
# generate family characteristics
hmis_generate_family_characteristics(hmis)
cp_generate_family_characteristics(cp)
return (hmis, cp)
###################
# get_raw methods #
###################
def get_raw_hmis():
"""
Pull in relevant CSVs from `../data/`, merge them, and return the raw HMIS dataframe.
"""
program = pd.read_csv('../data/hmis/program with family.csv')
client = pd.read_csv('../data/hmis/client de-identified.csv')
# NOTE we're taking an inner join here because the program csv got pulled after
# the client csv, because we added the family site identifier column to program
program = program.merge(client, on='Subject Unique Identifier', how='inner')
return program
def get_raw_cp():
"""
Pull in relevant CSVs from `../data/`, merge them, and return the raw Connecting Point dataframe.
"""
case = pd.read_csv("../data/connecting_point/case.csv")
case = case.rename(columns={'caseid': 'Caseid'})
client = pd.read_csv("../data/connecting_point/client.csv")
case = case.merge(client, on='Caseid', how='left')
return case
#############################################
# get_client_family_ids and related methods #
#############################################
def get_client_family_ids(hmis, cp):
"""
Given raw HMIS and Connecting Point dataframes, de-duplicate individuals and determine families across time.
See the README for more information about rationale and methodology.
The graph contains IDs from both HMIS and Connecting Point, so each vertex is represented as a tuple `(c, id)`,
where `c` is either `'h'` or `'c'`, to indicate whether the `id` corresponds to a row in HMIS or Connecting Point.
For example, `('h', 1234)` represents the row(s) in HMIS with individual ID `1234`, and `('c',5678)` represents the
row(s) in Connecting Point with individual ID `5678`.
:param hmis: HMIS dataframe.
:type hmis: Pandas.Dataframe.
:param cp: Connecting Point dataframe.
:type cp: Pandas.Dataframe.
"""
hmis = hmis.rename(columns={'Subject Unique Identifier': 'Raw Subject Unique Identifier'})
cp = cp.rename(columns={'Clientid': 'Raw Clientid'})
# create graph of individuals
G_individuals = nx.Graph()
G_individuals.add_nodes_from([('h', v) for v in hmis['Raw Subject Unique Identifier'].values])
G_individuals.add_nodes_from([('c', v) for v in cp['Raw Clientid'].values])
# add edges between same individuals
G_individuals.add_edges_from(group_edges('h', pd.read_csv('../data/hmis/hmis_client_duplicates_link_plus.csv'), ['Set ID'], 'Subject Unique Identifier'))
G_individuals.add_edges_from(group_edges('c', pd.read_csv('../data/connecting_point/cp_client_duplicates_link_plus.csv'), ['Set ID'], 'Clientid'))
G_individuals.add_edges_from(matching_edges())
# copy graph of individuals and add edges between individuals in the same family
G_families = G_individuals.copy()
G_families.add_edges_from(group_edges('h', hmis, ['Family Site Identifier','Program Start Date'], 'Raw Subject Unique Identifier'))
G_families.add_edges_from(group_edges('c', cp, ['Caseid'], 'Raw Clientid'))
# compute connected components and pull out ids for each dataframe for individuals and families
hmis_individuals = [get_ids_from_nodes('h', c) for c in nx.connected_components(G_individuals)]
cp_individuals = [get_ids_from_nodes('c', c) for c in nx.connected_components(G_individuals)]
hmis_families = [get_ids_from_nodes('h', c) for c in nx.connected_components(G_families)]
cp_families = [get_ids_from_nodes('c', c) for c in nx.connected_components(G_families)]
# create dataframes to merge
hmis_individuals = create_dataframe_from_grouped_ids(hmis_individuals, 'Subject Unique Identifier')
hmis_families = create_dataframe_from_grouped_ids(hmis_families, 'Family Identifier')
cp_individuals = create_dataframe_from_grouped_ids(cp_individuals, 'Clientid')
cp_families = create_dataframe_from_grouped_ids(cp_families, 'Familyid')
# merge into hmis and cp dataframes
hmis = hmis.merge(hmis_individuals, left_on='Raw Subject Unique Identifier', right_index=True, how='left')
hmis = hmis.merge(hmis_families, left_on='Raw Subject Unique Identifier', right_index=True, how='left')
cp = cp.merge(cp_individuals, left_on='Raw Clientid', right_index=True, how='left')
cp = cp.merge(cp_families, left_on='Raw Clientid', right_index=True, how='left')
return (hmis, cp)
def group_edges(node_prefix, df, group_ids, individual_id):
"""
Return the edge list from a grouping dataframe, either a Link Plus fuzzy matching or a dataframe, where people are
connected by appearing in the same family or case.
:param node_prefix: prefix for the nodes in the edge list.
:type node_prefix: str.
:param df: dataframe.
:type df: Pandas.Dataframe.
:param group_ids: grouping column names in grouping csv.
:type group_ids: [str].
:param individual_id: individual id column name in grouping csv.
:type individual_id: str.
"""
groups = df[group_ids+[individual_id]].dropna().drop_duplicates().set_index(group_ids)
edges = groups.merge(groups, left_index=True, right_index=True)
return [tuple(map(lambda v: (node_prefix, v), e)) for e in edges.values]
def matching_edges():
"""
Returns the edge list from a Connecting Point to HMIS matching CSV.
"""
matching = pd.read_csv('../data/matching/cp_hmis_match_results.csv').dropna()
return [(('c',v[0]),('h',v[1])) for v in matching[['clientid','Subject Unique Identifier']].values]
def get_ids_from_nodes(node_prefix, nodes):
"""
Take a list of nodes from G and returns a list of the ids of only the nodes with the given prefix.
param node_prefix: prefix for the nodes to keep.
type node_prefix: str.
param nodes: list of nodes from G.
type nodes: [(str, int)].
"""
return map(lambda pair: pair[1], filter(lambda pair: pair[0] == node_prefix, nodes))
def create_dataframe_from_grouped_ids(grouped_ids, col):
"""
Take a list of IDs, grouped by individual or family, and creates a dataframe where each ID in a group has the same
id in `col`.
For example, for the arguments `[[1, 2, 3], [4, 5, 6], [7, 8], [9]]` and `'Family Identifier'`, return a single-column dataframe:
```
Family Identifier
-+-----------------
1 0
2 0
3 0
4 1
5 1
6 1
7 2
8 2
9 3
```
param grouped_ids: a list of lists of ids.
type grouped_ids: [[int]].
param col: the name to give the single column in the dataframe.
type col: str.
"""
return pd.DataFrame({col: pd.Series({id: idx for idx, ids in enumerate(grouped_ids) for id in ids})})
#########################
# convert_dates methods #
#########################
def hmis_convert_dates(hmis):
"""
Given an HMIS dataframe, convert columns with dates to datetime columns.
:param hmis: HMIS dataframe.
:type hmis: Pandas.Dataframe.
"""
hmis['Raw Program Start Date'] = hmis['Program Start Date']
hmis['Program Start Date'] = pd.to_datetime(hmis['Program Start Date'])
hmis['Raw Program End Date'] = hmis['Program End Date']
hmis['Program End Date'] = pd.to_datetime(hmis['Program End Date'])
hmis['Raw DOB'] = hmis['DOB']
hmis['DOB'] = pd.to_datetime(hmis['DOB'])
return hmis
def cp_convert_dates(cp):
"""
Given a Connecting Point dataframe, convert columns with dates to datetime columns.
:param cp: Connecting Point dataframe.
:type cp: Pandas.Dataframe.
"""
cp['Raw servstart'] = cp['servstart']
cp['servstart'] = pd.to_datetime(cp['servstart'])
cp['Raw servend'] = cp['servend']
cp['servend'] = pd.to_datetime(cp['servend'])
cp['Raw LastUpdateDate'] = cp['LastUpdateDate']
cp['LastUpdateDate'] =
|
pd.to_datetime(cp['LastUpdateDate'])
|
pandas.to_datetime
|
"""
Analysis for automatically segmented cells.
Part of the annotations were computed in experiment 0003 and part in experiment 0004.
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'rreb1_tm1b_exp_0005_population_analysis_v8'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
# post-processing parameters
min_area = 203 / 2 # (pix^2) smaller objects are rejected
# max_area = 44879 * 3 # (pix^2) larger objects are rejected
xres_ref = 0.4538234626730202
yres_ref = 0.4537822752643282
min_area_um2 = min_area * xres_ref * yres_ref
# max_area_um2 = max_area * xres_ref * yres_ref
max_area_um2 = 25e3
# list of NDPI files that were processed
ndpi_files_list = [
'RREB1-TM1B-B6N-IC-1.1a 1132-18 G1 - 2019-02-20 09.56.50.ndpi',
'RREB1-TM1B-B6N-IC-1.1a 1132-18 M1 - 2019-02-20 09.48.06.ndpi',
'RREB1-TM1B-B6N-IC-1.1a 1132-18 P1 - 2019-02-20 09.29.29.ndpi',
'RREB1-TM1B-B6N-IC-1.1a 1132-18 S1 - 2019-02-20 09.21.24.ndpi',
'RREB1-TM1B-B6N-IC-1.1b 1133-18 G1 - 2019-02-20 12.31.18.ndpi',
'RREB1-TM1B-B6N-IC-1.1b 1133-18 M1 - 2019-02-20 12.15.25.ndpi',
'RREB1-TM1B-B6N-IC-1.1b 1133-18 P3 - 2019-02-20 11.51.52.ndpi',
'RREB1-TM1B-B6N-IC-1.1b 1133-18 S1 - 2019-02-20 11.31.44.ndpi',
'RREB1-TM1B-B6N-IC-1.1c 1129-18 G1 - 2019-02-19 14.10.46.ndpi',
'RREB1-TM1B-B6N-IC-1.1c 1129-18 M2 - 2019-02-19 13.58.32.ndpi',
'RREB1-TM1B-B6N-IC-1.1c 1129-18 P1 - 2019-02-19 12.41.11.ndpi',
'RREB1-TM1B-B6N-IC-1.1c 1129-18 S1 - 2019-02-19 12.28.03.ndpi',
'RREB1-TM1B-B6N-IC-1.1e 1134-18 G2 - 2019-02-20 14.43.06.ndpi',
'RREB1-TM1B-B6N-IC-1.1e 1134-18 P1 - 2019-02-20 13.59.56.ndpi',
'RREB1-TM1B-B6N-IC-1.1f 1130-18 G1 - 2019-02-19 15.51.35.ndpi',
'RREB1-TM1B-B6N-IC-1.1f 1130-18 M2 - 2019-02-19 15.38.01.ndpi',
'RREB1-TM1B-B6N-IC-1.1f 1130-18 S1 - 2019-02-19 14.39.24.ndpi',
'RREB1-TM1B-B6N-IC-1.1g 1131-18 G1 - 2019-02-19 17.10.06.ndpi',
'RREB1-TM1B-B6N-IC-1.1g 1131-18 M1 - 2019-02-19 16.53.58.ndpi',
'RREB1-TM1B-B6N-IC-1.1g 1131-18 P1 - 2019-02-19 16.37.30.ndpi',
'RREB1-TM1B-B6N-IC-1.1g 1131-18 S1 - 2019-02-19 16.21.16.ndpi',
'RREB1-TM1B-B6N-IC-1.1h 1135-18 G3 - 2019-02-20 15.46.52.ndpi',
'RREB1-TM1B-B6N-IC-1.1h 1135-18 M1 - 2019-02-20 15.30.26.ndpi',
'RREB1-TM1B-B6N-IC-1.1h 1135-18 P1 - 2019-02-20 15.06.59.ndpi',
'RREB1-TM1B-B6N-IC-1.1h 1135-18 S1 - 2019-02-20 14.56.47.ndpi',
'RREB1-TM1B-B6N-IC-2.1a 1128-18 G1 - 2019-02-19 12.04.29.ndpi',
'RREB1-TM1B-B6N-IC-2.1a 1128-18 M2 - 2019-02-19 11.26.46.ndpi',
'RREB1-TM1B-B6N-IC-2.1a 1128-18 P1 - 2019-02-19 11.01.39.ndpi',
'RREB1-TM1B-B6N-IC-2.1a 1128-18 S1 - 2019-02-19 11.59.16.ndpi',
'RREB1-TM1B-B6N-IC-2.2a 1124-18 G1 - 2019-02-18 10.15.04.ndpi',
'RREB1-TM1B-B6N-IC-2.2a 1124-18 M3 - 2019-02-18 10.12.54.ndpi',
'RREB1-TM1B-B6N-IC-2.2a 1124-18 P2 - 2019-02-18 09.39.46.ndpi',
'RREB1-TM1B-B6N-IC-2.2a 1124-18 S1 - 2019-02-18 09.09.58.ndpi',
'RREB1-TM1B-B6N-IC-2.2b 1125-18 G1 - 2019-02-18 12.35.37.ndpi',
'RREB1-TM1B-B6N-IC-2.2b 1125-18 P1 - 2019-02-18 11.16.21.ndpi',
'RREB1-TM1B-B6N-IC-2.2b 1125-18 S1 - 2019-02-18 11.06.53.ndpi',
'RREB1-TM1B-B6N-IC-2.2d 1137-18 S1 - 2019-02-21 10.59.23.ndpi',
'RREB1-TM1B-B6N-IC-2.2e 1126-18 G1 - 2019-02-18 14.58.55.ndpi',
'RREB1-TM1B-B6N-IC-2.2e 1126-18 M1- 2019-02-18 14.50.13.ndpi',
'RREB1-TM1B-B6N-IC-2.2e 1126-18 P1 - 2019-02-18 14.13.24.ndpi',
'RREB1-TM1B-B6N-IC-2.2e 1126-18 S1 - 2019-02-18 14.05.58.ndpi',
'RREB1-TM1B-B6N-IC-5.1a 0066-19 G1 - 2019-02-21 15.26.24.ndpi',
'RREB1-TM1B-B6N-IC-5.1a 0066-19 M1 - 2019-02-21 15.04.14.ndpi',
'RREB1-TM1B-B6N-IC-5.1a 0066-19 P1 - 2019-02-21 14.39.43.ndpi',
'RREB1-TM1B-B6N-IC-5.1a 0066-19 S1 - 2019-02-21 14.04.12.ndpi',
'RREB1-TM1B-B6N-IC-5.1b 0067-19 P1 - 2019-02-21 16.32.24.ndpi',
'RREB1-TM1B-B6N-IC-5.1b 0067-19 S1 - 2019-02-21 16.00.37.ndpi',
'RREB1-TM1B-B6N-IC-5.1b 67-19 G1 - 2019-02-21 17.29.31.ndpi',
'RREB1-TM1B-B6N-IC-5.1b 67-19 M1 - 2019-02-21 17.04.37.ndpi',
'RREB1-TM1B-B6N-IC-5.1c 68-19 G2 - 2019-02-22 09.43.59.ndpi',
'RREB1-TM1B-B6N-IC- 5.1c 68 -19 M2 - 2019-02-22 09.27.30.ndpi',
'RREB1-TM1B-B6N-IC -5.1c 68 -19 peri3 - 2019-02-22 09.08.26.ndpi',
'RREB1-TM1B-B6N-IC- 5.1c 68 -19 sub2 - 2019-02-22 08.39.12.ndpi',
'RREB1-TM1B-B6N-IC-5.1d 69-19 G2 - 2019-02-22 15.13.08.ndpi',
'RREB1-TM1B-B6N-IC-5.1d 69-19 M1 - 2019-02-22 14.39.12.ndpi',
'RREB1-TM1B-B6N-IC-5.1d 69-19 Peri1 - 2019-02-22 12.00.19.ndpi',
'RREB1-TM1B-B6N-IC-5.1d 69-19 sub1 - 2019-02-22 11.44.13.ndpi',
'RREB1-TM1B-B6N-IC-5.1e 70-19 G3 - 2019-02-25 10.34.30.ndpi',
'RREB1-TM1B-B6N-IC-5.1e 70-19 M1 - 2019-02-25 09.53.00.ndpi',
'RREB1-TM1B-B6N-IC-5.1e 70-19 P2 - 2019-02-25 09.27.06.ndpi',
'RREB1-TM1B-B6N-IC-5.1e 70-19 S1 - 2019-02-25 08.51.26.ndpi',
'RREB1-TM1B-B6N-IC-7.1a 71-19 G1 - 2019-02-25 12.27.06.ndpi',
'RREB1-TM1B-B6N-IC-7.1a 71-19 P1 - 2019-02-25 11.31.30.ndpi',
'RREB1-TM1B-B6N-IC-7.1a 71-19 S1 - 2019-02-25 11.03.59.ndpi'
]
########################################################################################################################
## Compute cell populations from automatically segmented images in two depots: SQWAT and GWAT:
## Cell area histograms
## HD quantiles of cell areas
## The results are saved, so in later sections, it's possible to just read them for further analysis.
## GENERATES DATA USED IN FOLLOWING SECTIONS
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
import shapely
import scipy.stats as stats
import openslide
import numpy as np
import scipy.stats
import sklearn.neighbors, sklearn.model_selection
import pandas as pd
# from mlxtend.evaluate import permutation_test
# from statsmodels.stats.multitest import multipletests
# import math
import PIL
import warnings
# directories
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Rreb1_tm1b/annotations')
histology_dir = os.path.join(home, 'scan_srv2_cox/Liz Bentley/Grace/RREB1 Feb19')
dataframe_dir = os.path.join(home, 'GoogleDrive/Research/20200826_Rreb1_Grace')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20200826_Rreb1_Grace/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/rreb1')
# we are not using cell overlap in this study, thus, we use the 'auto' method (watershed)
method = 'auto'
DEBUG = False
SAVEFIG = False
# 0.05, 0.1 , 0.15, 0.2, ..., 0.9 , 0.95
quantiles = np.linspace(0, 1, 21)
# bins for the cell population histograms
area_bin_edges = np.linspace(min_area_um2, max_area_um2, 201)
area_bin_centers = (area_bin_edges[0:-1] + area_bin_edges[1:]) / 2.0
# data file with extra info for the dataframe (quantiles and histograms bins)
dataframe_areas_extra_filename = os.path.join(dataframe_dir, 'rreb1_tm1b_exp_0005_dataframe_areas_extra.npz')
# if the file doesn't exist, save it
if not os.path.isfile(dataframe_areas_extra_filename):
np.savez(dataframe_areas_extra_filename, quantiles=quantiles, area_bin_edges=area_bin_edges,
area_bin_centers=area_bin_centers)
# dataframe with histograms and smoothed histograms of cell populations in each slide
dataframe_areas_filename = os.path.join(dataframe_dir, 'rreb1_tm1b_exp_0005_dataframe_areas_' + method + '.pkl')
if os.path.isfile(dataframe_areas_filename):
# load dataframe with cell population quantiles and histograms
df_all = pd.read_pickle(dataframe_areas_filename)
else:
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'rreb1_tm1b_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# keep only the last part of the ID in 'Animal Identifier', so that it can be found in the filename
# RREB1-TM1B-B6N-IC/5.1c -> 5.1c
metainfo['Animal Identifier'] = [x.replace('RREB1-TM1B-B6N-IC/', '') for x in metainfo['Animal Identifier']]
# rename columns to make them easier to use in statsmodels
metainfo = metainfo.rename(
columns={'Weight (g)': 'Weight', 'Gonadal_AT (g)': 'Gonadal', 'Mesenteric_AT (g)': 'Mesenteric',
'PAT+RPAT (g)': 'PAT', 'Brown_AT (g)': 'Brown', 'SAT (g)': 'SAT'})
# make sure that in the boxplots Rreb1-tm1b:WT comes before Rreb1-tm1b:Het, and female before male
metainfo['Sex'] = metainfo['Sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['Genotype'] = metainfo['Genotype'].astype(
pd.api.types.CategoricalDtype(categories=['Rreb1-tm1b:WT', 'Rreb1-tm1b:Het'], ordered=True))
# mean mouse body weight (female and male)
mean_bw_f = metainfo[metainfo['Sex'] == 'f']['Weight'].mean()
mean_bw_m = metainfo[metainfo['Sex'] == 'm']['Weight'].mean()
# dataframe to keep all results, one row per annotations file
df_all = pd.DataFrame()
# get filenames of the annotations. Note that they come from two experiments, 0003 or 0004, so we have to check from
# which one
json_annotation_files = []
for i_file, file in enumerate(ndpi_files_list):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': '
+ os.path.basename(file))
json_file = os.path.join(annotations_dir, file.replace('.ndpi', '_exp_0003_auto_aggregated.json'))
if os.path.isfile(json_file):
print('\tExperiment 0003')
json_annotation_files.append(json_file)
else:
json_file = os.path.join(annotations_dir, file.replace('.ndpi', '_exp_0004_auto_aggregated.json'))
if os.path.isfile(json_file):
print('\tExperiment 0004')
json_annotation_files.append(json_file)
else:
warnings.warn('Annotations file not found')
# process annotations files
for i_file, (ndpi_file, json_file) in enumerate(zip(ndpi_files_list, json_annotation_files)):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': '
+ os.path.basename(json_file))
if not os.path.isfile(json_file):
warnings.warn('Missing annotations file')
continue
# open full resolution histology slide
im = openslide.OpenSlide(os.path.join(histology_dir, os.path.basename(ndpi_file)))
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = float(im.properties['openslide.mpp-x']) # um/pixel
yres = float(im.properties['openslide.mpp-y']) # um/pixel
# figure out what depot these cells belong to
aux = ndpi_file.replace('RREB1-TM1B-B6N-IC', '')
is_gonadal = any([s in aux for s in ['G1', 'G2', 'G3']])
is_perineal = any([s in aux for s in ['P1', 'P2', 'P3', 'peri', 'Peri']])
is_subcutaneous = any([s in aux for s in ['S1', 'S2', 'S3', 'sub', 'Sub']])
is_mesenteric = any([s in aux for s in ['M1', 'M2', 'M3']])
n_matches = np.count_nonzero([is_gonadal, is_perineal, is_subcutaneous, is_mesenteric])
if n_matches != 1:
raise ValueError(['Filename matches in metainfo table: ' + str(n_matches)])
if is_gonadal:
depot_label = 'Gonadal'
elif is_perineal:
depot_label = 'PAT' # perineal + retroperineal
elif is_subcutaneous:
depot_label = 'SAT'
elif is_mesenteric:
depot_label = 'Mesenteric'
aux = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
tags_to_keep=['Animal Identifier', 'Sex', 'Genotype',
'Weight', 'Gonadal', 'PAT',
'SAT', 'Mesenteric'],
id_tag='Animal Identifier')
df = aux.drop(labels=['Gonadal', 'PAT', 'SAT', 'Mesenteric'], axis='columns')
df['depot'] = depot_label
df['depot_weight'] = aux[depot_label]
# load contours and their confidence measure from annotation file
cells, props = cytometer.data.aida_get_contours(json_file, layer_name='White adipocyte.*', return_props=True)
# compute areas of the cells (um^2)
areas = np.array([shapely.geometry.Polygon(cell).area for cell in cells]) * xres * yres # um^2
# smooth out histogram
kde = sklearn.neighbors.KernelDensity(bandwidth=100, kernel='gaussian').fit(areas.reshape(-1, 1))
log_dens = kde.score_samples(area_bin_centers.reshape(-1, 1))
pdf = np.exp(log_dens)
# compute mode
df['area_smoothed_mode'] = area_bin_centers[np.argmax(pdf)]
# compute areas at population quantiles
areas_at_quantiles = stats.mstats.hdquantiles(areas, prob=quantiles, axis=0)
df['area_at_quantiles'] = [areas_at_quantiles]
# compute histograms with area binning
histo, _ = np.histogram(areas, bins=area_bin_edges, density=True)
df['histo'] = [list(histo)]
# smoothed histogram
df['smoothed_histo'] = [list(pdf)]
if DEBUG:
plt.clf()
plt.plot(1e-3 * area_bin_centers, df['histo'].to_numpy()[0], label='Areas')
plt.plot(1e-3 * area_bin_centers, df['smoothed_histo'].to_numpy()[0], label='Kernel')
plt.plot([df['area_smoothed_mode'] * 1e-3, df['area_smoothed_mode'] * 1e-3],
[0, np.array(df['smoothed_histo'].to_numpy()[0]).max()], 'k', label='Mode')
plt.legend()
plt.xlabel('Area ($10^3 \cdot \mu m^2$)', fontsize=14)
# add results to total dataframe
df_all = pd.concat([df_all, df], ignore_index=True)
# save dataframe with data from both depots for current method (auto or corrected)
df_all.to_pickle(dataframe_areas_filename)
########################################################################################################################
## Import packages and auxiliary functions common to all analysis sections
## Load metainfo and cell population data
## USED IN PAPER
########################################################################################################################
from toolz import interleave
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
import seaborn as sns
import cytometer.stats
# directories
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Rreb1_tm1b/annotations')
histology_dir = os.path.join(home, 'scan_srv2_cox/Liz Bentley/Grace/RREB1 Feb19')
dataframe_dir = os.path.join(home, 'GoogleDrive/Research/20200826_Rreb1_Grace')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20200826_Rreb1_Grace/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/rreb1')
DEBUG = False
method = 'auto'
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'rreb1_tm1b_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# keep only the last part of the ID in 'Animal Identifier', so that it can be found in the filename
# RREB1-TM1B-B6N-IC/5.1c -> 5.1c
metainfo['Animal Identifier'] = [x.replace('RREB1-TM1B-B6N-IC/', '') for x in metainfo['Animal Identifier']]
# rename columns to make them easier to use in statsmodels
metainfo = metainfo.rename(columns={'Weight (g)':'Weight', 'Gonadal_AT (g)':'Gonadal', 'Mesenteric_AT (g)':'Mesenteric',
'PAT+RPAT (g)':'PAT', 'Brown_AT (g)':'Brown', 'SAT (g)':'SAT'})
# make sure that in the boxplots Rreb1-tm1b:WT comes before Rreb1-tm1b:Het, and female before male
metainfo['Sex'] = metainfo['Sex'].astype(
|
pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True)
|
pandas.api.types.CategoricalDtype
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
from modin.data_management.utils import length_fn_pandas, width_fn_pandas
from modin.engines.base.frame.partition import BaseFramePartition
from distributed.client import get_client
from distributed import Future
from distributed.utils import get_ip
import cloudpickle as pkl
from dask.distributed import wait
def apply_list_of_funcs(funcs, df):
for func, kwargs in funcs:
if isinstance(func, bytes):
func = pkl.loads(func)
df = func(df, **kwargs)
return df, get_ip()
class PandasOnDaskFramePartition(BaseFramePartition):
"""This abstract class holds the data and metadata for a single partition.
The methods required for implementing this abstract class are listed in
the section immediately following this.
The API exposed by the children of this object is used in
`BaseFrameManager`.
Note: These objects are treated as immutable by `BaseFrameManager`
subclasses. There is no logic for updating inplace.
"""
def __init__(self, future, length=None, width=None, ip=None, call_queue=None):
self.future = future
if call_queue is None:
call_queue = []
self.call_queue = call_queue
self._length_cache = length
self._width_cache = width
self._ip_cache = ip
def get(self):
"""Flushes the call_queue and returns the data.
Note: Since this object is a simple wrapper, just return the data.
Returns
-------
The object that was `put`.
"""
self.drain_call_queue()
# blocking operation
if isinstance(self.future, pandas.DataFrame):
return self.future
return self.future.result()
def apply(self, func, **kwargs):
"""Apply some callable function to the data in this partition.
Note: It is up to the implementation how kwargs are handled. They are
an important part of many implementations. As of right now, they
are not serialized.
Parameters
----------
func : callable
The lambda to apply (may already be correctly formatted)
Returns:
A new `PandasOnDaskFramePartition` containing the object that has had `func`
applied to it.
"""
func = pkl.dumps(func)
call_queue = self.call_queue + [[func, kwargs]]
future = get_client().submit(
apply_list_of_funcs, call_queue, self.future, pure=False
)
futures = [get_client().submit(lambda l: l[i], future) for i in range(2)]
return PandasOnDaskFramePartition(futures[0], ip=futures[1])
def add_to_apply_calls(self, func, **kwargs):
"""Add the function to the apply function call stack.
Note: This function will be executed when apply is called. It will be executed
in the order inserted; apply's func operates the last and return
Parameters
----------
func : callable
The function to apply.
Returns
-------
A new `PandasOnDaskFramePartition` with the function added to the call queue.
"""
return PandasOnDaskFramePartition(
self.future, call_queue=self.call_queue + [[pkl.dumps(func), kwargs]]
)
def drain_call_queue(self):
"""Execute all functionality stored in the call queue."""
if len(self.call_queue) == 0:
return
new_partition = self.apply(lambda x: x)
self.future = new_partition.future
self._ip_cache = new_partition._ip_cache
self.call_queue = []
def wait(self):
self.drain_call_queue()
wait(self.future)
def mask(self, row_indices, col_indices):
"""Lazily create a mask that extracts the indices provided.
Parameters
----------
row_indices: The indices for the rows to extract.
col_indices: The indices for the columns to extract.
Returns
-------
A `PandasOnDaskFramePartition` object.
"""
new_obj = self.add_to_apply_calls(
lambda df:
|
pandas.DataFrame(df.iloc[row_indices, col_indices])
|
pandas.DataFrame
|
from __future__ import print_function
import pandas as pd
import os
import logging
import argparse
'''
This file reads in data related E. coli levels
in Chicago beaches. It is based on the files
analysis.R and split_sheets.R, and is written
such that the dataframe loaded here will match
the R dataframe code exactly.
'''
# This is an adaptation of previous read_data.py so that it runs on Python3
# Some variable names changed. Notably, Client.ID is now Beach
# Added day of week and month variables
# Also adds columns to dataframe:
# YesterdayEcoli : prior days reading
# DayBeforeYesterdayEcoli : two days prior reading
# actual_elevated : where Escherichia_coli >=235
# predicted_elevated : where Drek_Prediction >=235
#
# TODO: verbose
# TODO: use multi-level index on date/beach ?
# TODO: standardize on inplace=True or not inplace
# TODO: how much consistency do we want between python columns
# and the R columns?
# TODO: create better docstrings
# TODO: remove print statements and the import
# TODO: loyola/leone the same?
# TODO: repeats on 2015-06-16 ?
# and some of 2012?
# Just check for these everywhere, why is it happening?
def split_sheets(file_name, year, verbose=False):
'''
Reads in all sheets of an excel workbook, concatenating
all of the information into a single dataframe.
The excel files were unfortunately structured such that
each day had its own sheet.
'''
xls = pd.ExcelFile(file_name)
dfs = []
standardized_col_names = [
'Date', 'Laboratory_ID', 'Beach', 'Reading1',
'Reading2', 'Escherichia_coli', 'Units', 'Sample_Collection_Time'
]
for i, sheet_name in enumerate(xls.sheet_names):
if not xls.book.sheet_by_name(sheet_name).nrows:
# Older versions of ExcelFile.parse threw an error if the sheet
# was empty, explicitly check for this condition.
logging.debug('sheet "{0}" from {1} is empty'.format(sheet_name,
year))
continue
df = xls.parse(sheet_name)
if i == 0 and len(df.columns) > 30:
# This is the master/summary sheet
logging.debug('ignoring sheet "{0}" from {1}'.format(sheet_name,
year))
continue
if df.index.dtype == 'object':
# If the first column does not have a label, then the excel
# parsing engine will helpfully use the first column as
# the index. This is *usually* helpful, but there are two
# days when the first column is missing the typical label
# of 'Laboratory ID'. In this case, peel that index off
# and set its name.
msg = '1st column in sheet "{0}" from {1} is missing title'.format(
sheet_name, year)
logging.debug(msg)
df.reset_index(inplace=True)
df.columns = ['Laboratory ID'] + df.columns.tolist()[1:]
# Insert name of sheet as first column, the sheet name is the date
df.insert(0, u'Date', sheet_name)
for c in df.columns.tolist():
if 'Reading' in c:
# There are about 10 days that have >2 readings for some reason
if int(c[8:]) > 2:
logging.info('sheet "{0}" from {1} has >2 readings'.format(
sheet_name, year)
)
df.drop(c, 1, inplace=True)
# Only take the first 8 columns, some sheets erroneously have >8 cols
df = df.ix[:,0:8]
# Standardize the column names
df.columns = standardized_col_names
dfs.append(df)
df = pd.concat(dfs)
df.insert(0, u'Year', str(year))
logging.info('Removing data with missing Client ID')
df.dropna(subset=['Beach'], inplace=True)
return df
def read_holiday_data(file_name, verbose=False):
df = pd.read_csv(file_name)
df['Date'] = pd.to_datetime(df['Date'])
return df
def read_water_sensor_data(verbose=False):
'''
Downloads and reads water sensor data from the Chicago data
portal. Downsamples the readings into the min, mean, and max
for each day and for each sensor. Each day only has one row,
with many columns (one column each per sensor per reading per
type of down-sampling process)
'''
url = 'https://data.cityofchicago.org/api/views/qmqz-2xku/rows.csv?accessType=DOWNLOAD'
water_sensors = pd.read_csv(url)
url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'
sensor_locations = pd.read_csv(url)
df = pd.merge(water_sensors, sensor_locations,
left_on='Beach Name', right_on='Sensor Name')
df.drop(['Sensor Type', 'Location'], 1, inplace=True)
# TODO: map sensor to beach ???
df['Beach Name'] = df['Beach Name'].apply(lambda x: x[0:-6])
df['Measurement Timestamp'] = pd.to_datetime(df['Measurement Timestamp'])
df['Date'] = pd.DatetimeIndex(df['Measurement Timestamp']).normalize()
df.drop(['Battery Life', 'Measurement Timestamp', 'Measurement Timestamp Label',
'Measurement ID', 'Sensor Name'], axis=1, inplace=True)
df_mins = df.groupby(['Beach Name', 'Date'], as_index=False).min()
df_means = df.groupby(['Beach Name', 'Date'], as_index=False).mean()
df_maxes = df.groupby(['Beach Name', 'Date'], as_index=False).max()
df_mins.drop(['Latitude','Longitude'],1,inplace=True)
df_means.drop(['Latitude','Longitude'],1,inplace=True)
df_maxes.drop(['Latitude','Longitude'],1,inplace=True)
cols = df_mins.columns.tolist()
def rename_columns(cols, aggregation_type):
cols = list(map(lambda x: x.replace(' ', '_'), cols))
for i in range(2,7):
cols[i] = cols[i] + '_' + aggregation_type
return cols
df_mins.columns = rename_columns(cols, 'Min')
df_means.columns = rename_columns(cols, 'Mean')
df_maxes.columns = rename_columns(cols, 'Max')
df = pd.merge(df_mins, df_means, on=['Beach_Name', 'Date'])
df = pd.merge(df, df_maxes, on=['Beach_Name', 'Date'])
df = df.pivot(index='Date', columns='Beach_Name')
df.columns = ['.'.join(col[::-1]).strip() for col in df.columns.values]
df.reset_index(inplace=True)
df.columns = ['Full_date'] + list( map(lambda x: x.replace(' ', '_'), df.columns.tolist()[1:]))
c = df.columns.tolist()
c[c.index('Full_date')] = 'Date'
df.columns = c
return df
def read_weather_station_data(verbose=False):
'''
Downloads and reads weather sensor data from the Chicago data
portal. Downsamples the readings into the min, mean, and max
for each day and for each sensor. Each day only has one row,
with many columns (one column each per sensor per reading per
type of down-sampling process)
'''
url = 'https://data.cityofchicago.org/api/views/k7hf-8y75/rows.csv?accessType=DOWNLOAD'
weather_sensors = pd.read_csv(url)
url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'
sensor_locations = pd.read_csv(url)
weather_sensors.columns = map(lambda x: x.replace(' ', '_'),
weather_sensors.columns.tolist())
sensor_locations.columns = map(lambda x: x.replace(' ', '_'),
sensor_locations.columns.tolist())
sensor_locations.columns = ['Station_Name'] + sensor_locations.columns.tolist()[1:]
df = pd.merge(weather_sensors, sensor_locations, on='Station_Name')
df['Beach'] = df['Station_Name']
df['Date'] =
|
pd.DatetimeIndex(df['Measurement_Timestamp'])
|
pandas.DatetimeIndex
|
import pandas as pd
import matplotlib.pyplot as plt
from PyQt5.QtCore import *
from libs.figure.figure_QDialog import fig_Dialog
import os
import numpy as np
class save_DynamicResult_(QThread):
def __init__(self, over_tracked, parameter, save_path, parent=None):
super(save_DynamicResult_, self).__init__()
self.overtracked = over_tracked
self.particle = {"binding":[], "debinding":[]}
self.parameter = parameter
self.save_path = save_path
self.binding = []
self.debinding = []
self.Method = parameter[0]
self.SubImg_T = parameter[1]
def save(self):
all = self.overtracked
for i in range(len(all)):
if self.Method == 0:
start_frame = all[i][1][0]
over_frame = all[i][-1][0]
if all[i][-1][2] == "debinding":
over_index = self.search_debinding(all[i])
over_frame = all[i][over_index][0]
if self.Method == 0 and all[i][-1][2] == "binding" and all[i][-1][0] % self.SubImg_T == 0:
# TODO:这里需要修改!!!
pass # 如果减第一帧,该轨迹的最后一帧是500的整数倍,那就认为该粒子还存在
self.binding.append(start_frame)
self.debinding.append(over_frame)
else:
if len(all[i]) == 2:
# 如果这一类只有一个,可能为binding也可能为debinding,那就添加进去
if all[i][-1][2] != "debinding":
self.particle[all[i][-1][2]].append(all[i][-1][0])
pass
# 下面是类别中大于2个的,标准为binding开始,debinding结束,不标准的则是binding开始,binding结束,
start_frame = all[i][1][0]
over_frame = all[i][-1][0]
over_index = -1
if all[i][-1][2] == "debinding":
over_index = self.search_debinding(all[i])
over_frame = all[i][over_index][0]
self.particle["binding"].append(start_frame)
self.particle["debinding"].append(over_frame)
# if all[i][-1][2] == "debinding":
# over_index = self.search_debinding(all[i])
# over_frame = all[i][over_index][0]
# if all[i][-1][2] == "binding" and all[i][over_index][2] == "debinding":
# self.particle["binding"].append(start_frame)
# self.particle["debinding"].append(over_frame)
# elif all[i][-1][2] == "binding" and all[i][over_index][2] == "binding":
# self.particle["binding"].append(start_frame)
# elif all[i][-1][2] == "debinding" and all[i][over_index][2] == "debinding":
# self.particle["debinding"].append(over_frame)
if self.Method == 1:
self.binding = self.particle["binding"]
self.debinding = self.particle["debinding"]
print(self.binding)
binding = self.sort_(self.binding)
debinding = self.sort_(self.debinding)
binding_Data = pd.DataFrame(binding, columns=["Frame", "New Binding"])
binding_Data = binding_Data.set_index("Frame", drop=True)
debinding_Data = pd.DataFrame(debinding, columns=["Frame", "New Debinding"])
debinding_Data = debinding_Data.set_index("Frame", drop=True)
df = pd.concat([binding_Data, debinding_Data], axis=1)
print(df)
max_index = df.index[-1]
index = [i for i in range(1, max_index + 1)]
data = np.zeros([max_index, 2])
for i in df.index:
data[i - 1, :] = df.loc[i, :]
new = pd.DataFrame(data, index=index, columns=["New Binding", "New Debinding"])
new = new.fillna(0)
have_binding = [[1, 0]]
have_debinding = [[1, 0]]
b_, deb_ = 0, 0
for i in range(1, len(new)):
b_ += new.iloc[i]["New Binding"]
deb_ += new.iloc[i]["New Debinding"]
have_binding.append([i + 1, b_])
have_debinding.append([i + 1, deb_])
have_binding_Data =
|
pd.DataFrame(have_binding, columns=["Frame", "have Binding"])
|
pandas.DataFrame
|
__version__ = 0.1
__author__ = '<NAME>'
__doc__ = """
Utility function for json.
"""
import numpy as np
import pandas as pd
import json
class MyJson(object):
def __init__(self):
pass
def parse_json_col(self, df,json_col):
"""Explode the json column and attach to original dataframe.
Parameters
-----------
df: pandas.DataFrame
input dataframe
json_col: string
Column name of dataframe which contains json objects.
Example:
--------
import numpy as np
import pandas as pd
pd.options.display.max_colwidth=999
from bp.ds_json import MyJson
df = pd.DataFrame({'id': [0],
'payload': [\"""{"analytics": {"device": "Desktop",
"email_open_rate_pct": 14.0},
"industry": "Construction",
"time_in_product_mins": 62.45}\"""]
})
mj = MyJson()
ans = mj.parse_json_col(df,'payload')
"""
# give increasing index to combine later
df = df.reset_index()
df_json = df[json_col].apply(json.loads).apply(pd.json_normalize)
df_json = pd.concat(df_json.to_numpy())
df_json.index = range(len(df_json))
df_no_json = df.drop(json_col,axis=1)
cols = df_no_json.columns.tolist() + df_json.columns.tolist()
df_combined =
|
pd.concat([df_no_json, df_json], axis=1, ignore_index=False)
|
pandas.concat
|
import os
import nltk
import pandas as pd
from nltk.stem.lancaster import LancasterStemmer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import LabelEncoder as LE
from sklearn.svm import SVC
from vectorizers.factory import get_vectoriser
class FaqEngine:
def __init__(self, faqslist, type='tfidf'):
self.faqslist = faqslist
self.stemmer = LancasterStemmer()
self.le = LE()
self.classifier = None
self.build_model(type)
def cleanup(self, sentence):
word_tok = nltk.word_tokenize(sentence)
stemmed_words = [self.stemmer.stem(w) for w in word_tok]
return ' '.join(stemmed_words)
def build_model(self, type):
self.vectorizer = get_vectoriser(type) # TfidfVectorizer(min_df=1, stop_words='english')
dataframeslist = [
|
pd.read_csv(csvfile)
|
pandas.read_csv
|
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from os import getenv
from functools import lru_cache
from hashlib import blake2s
from io import BytesIO
from datetime import datetime
from json import loads
import logging
# 3rd party:
from sqlalchemy.dialects.postgresql import insert, dialect as postgres
from sqlalchemy.exc import ProgrammingError
from pandas import read_feather, to_datetime, DataFrame, read_sql
from numpy import NaN, ndarray, array_split
from azure.core.exceptions import ResourceNotFoundError
# Internal:
try:
from __app__.storage import StorageClient
from __app__.db_tables.covid19 import (
Session, MainData, ReleaseReference,
AreaReference, MetricReference, DB_INSERT_MAX_ROWS
)
except ImportError:
from storage import StorageClient
from db_tables.covid19 import (
Session, MainData, ReleaseReference,
AreaReference, MetricReference, DB_INSERT_MAX_ROWS
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'main',
'generate_row_hash',
'to_sql',
'deploy_preprocessed',
'get_partition_id',
'get_release',
'create_partition',
'deploy_preprocessed_long',
'trim_sides'
]
RECORD_KEY = getenv("RECORD_KEY").encode()
def trim_sides(data):
for metric in data.metric.dropna().unique():
dm = (
data
.loc[data.metric == metric, :]
.sort_values(["date"], ascending=True)
)
if not dm.payload.dropna().size:
continue
try:
cumsum = dm.payload.abs().cumsum()
first_nonzero = cumsum.loc[cumsum > 0].index[0]
except (TypeError, IndexError):
first_nonzero = dm.payload.first_valid_index()
try:
dm.loc[:first_nonzero + 1] = NaN
except KeyError:
continue
if not dm.payload.dropna().size:
continue
last_valid = dm.payload.last_valid_index()
try:
dm.loc[last_valid - 1:, :] = NaN
data.loc[dm.index] = dm
except KeyError:
continue
return data.dropna(how="all", axis=0)
def get_area_data():
session = Session()
try:
results = read_sql(
f"""\
SELECT c.id AS "area_id", c.area_type, c.area_code
FROM covid19.area_reference AS c
""",
con=session.connection(),
index_col=["area_type", "area_code"]
)
except Exception as err:
session.rollback()
raise err
finally:
session.close()
return results
nested = {
"newAdmissionsByAge",
"cumAdmissionsByAge",
"femaleCases",
"maleCases",
"femaleNegatives",
"maleNegatives",
"newCasesBySpecimenDateAgeDemographics",
"newDeaths28DaysByDeathDateAgeDemographics"
}
id_vars = [
'area_type',
'area_code',
'partition_id',
'date',
'release_id',
'hash'
]
metric_names = {
'areaType': 'area_type',
'areaCode': 'area_code',
'areaName': 'area_name',
'releaseTimestamp': 'release_timestamp',
}
def generate_row_hash(d: DataFrame, hash_only=False, date=None) -> DataFrame:
"""
Parameters
----------
d
hash_only
date
Returns
-------
"""
hash_cols = [
"date",
"area_type",
"area_code",
"metric_id",
"release_id"
]
try:
d.date = d.date.map(lambda x: x.strftime("%Y-%m-%d"))
except AttributeError:
pass
d.date = d.date.map(lambda x: x[:10])
# Create hash
hash_key = (
d
.loc[:, hash_cols]
.astype(str)
.sum(axis=1)
.apply(str.encode)
.apply(lambda x: blake2s(x, key=RECORD_KEY, digest_size=12).hexdigest())
)
if hash_only:
return hash_key
column_names = d.columns
data = d.assign(
hash=hash_key,
seriesDate=date,
id=hash_key
).loc[:, ['id', 'hash', 'seriesDate', *list(column_names)]]
return data
def get_metrics():
session = Session()
try:
metrics = read_sql(
f"""\
SELECT ref.id AS "metric_id", ref.metric
FROM covid19.metric_reference AS ref;
""",
con=session.connection(),
index_col=["metric"]
)
except Exception as err:
session.rollback()
raise err
finally:
session.close()
return metrics
@lru_cache(256)
def get_release(timestamp):
insert_stmt = (
insert(ReleaseReference.__table__)
.values(timestamp=timestamp)
)
stmt = (
insert_stmt
.on_conflict_do_update(
index_elements=[ReleaseReference.timestamp],
set_={ReleaseReference.timestamp.name: insert_stmt.excluded.timestamp}
)
.returning(ReleaseReference.id)
)
# session = Session(autocommit=True)
session = Session()
try:
response = session.execute(stmt)
result = response.fetchone()[0]
except Exception as err:
session.rollback()
raise err
finally:
session.close()
return result
def get_partition_id(area_type, release):
if area_type in ["nhsTrust", "utla", "ltla", "msoa"]:
partition_id = f"{release:%Y_%-m_%-d}|{area_type.lower()}"
else:
partition_id = f"{release:%Y_%-m_%-d}|other"
return partition_id
def create_partition(area_type: str, release: datetime):
"""
Creates new database partition - if one doesn't already exist - for
the `time_series` table based on `area_type` and `release` datestamp.
Parameters
----------
area_type : str
Area type, as defined in the `area_reference` table.
release: datetime
Release timestamp of the data.
Returns
-------
NoReturn
"""
partition_id = get_partition_id(area_type, release)
if area_type in ["nhsTrust", "utla", "ltla", "msoa"]:
area_partition = f"{release:%Y_%-m_%-d}_{area_type.lower()}"
else:
area_partition = f"{release:%Y_%-m_%-d}_other"
session = Session()
try:
session.execute(
f"""
CREATE TABLE IF NOT EXISTS covid19.time_series_p{area_partition}
PARTITION OF covid19.time_series ( partition_id )
FOR VALUES IN ('{partition_id}');
"""
)
session.flush()
except ProgrammingError as e:
session.rollback()
except Exception as err:
session.rollback()
raise err
finally:
session.close()
def sql_fn(row):
return MainData(**row.to_dict())
def to_sql(df: DataFrame):
if df.size == 0:
return None
df_size = df.shape[0]
n_chunks = df_size // DB_INSERT_MAX_ROWS + 1
df.drop_duplicates(
["release_id", "area_id", "metric_id", "date"],
keep="first",
inplace=True
)
session = Session()
connection = session.connection()
try:
for chunk in df.pipe(array_split, n_chunks):
records = chunk.to_dict(orient="records")
insert_stmt = insert(MainData.__table__).values(records)
stmt = insert_stmt.on_conflict_do_update(
index_elements=[MainData.hash, MainData.partition_id],
set_={MainData.payload.name: insert_stmt.excluded.payload}
)
connection.execute(stmt)
session.flush()
except Exception as err:
session.rollback()
raise err
finally:
session.close()
return None
def validate_metrics(dt):
metrics = get_metrics()
invalid_metrics = set(dt.metric).difference(metrics.index)
if len(invalid_metrics):
for metric in invalid_metrics:
add_metric(metric)
return dt.join(get_metrics(), on=["metric"])
return dt.join(metrics, on=["metric"])
def deploy_nested(df, key):
if key not in df.columns:
return df
dt = df.loc[:, ["date", "area_type", "area_code", "partition_id", "release_id", key]]
if dt.size:
try:
dt.drop(columns=set(dt.columns).difference([*id_vars, key]), inplace=True)
dt.rename(columns={key: "payload"}, inplace=True)
dt.dropna(subset=["payload"], inplace=True)
dt.payload = dt.payload.map(lambda x: list(x) if not isinstance(x, dict) else list())
to_sql(
dt
.assign(metric=key)
.join(get_area_data(), on=["area_type", "area_code"])
.pipe(validate_metrics)
.pipe(lambda d: d.assign(hash=generate_row_hash(d, hash_only=True)))
.loc[:, ["metric_id", "area_id", "partition_id", "release_id", "hash", "date", "payload"]]
)
except Exception as e:
raise e
return df
def deploy_preprocessed_long(df):
"""
Generates hash key and deploys the data to the database.
Data must be preprocessed and in long (unstacked) format.
Parameters
----------
df: DataFrame
Dataframe containing the following columns:
- metric_id
- area_id
- partition_id
- release_id
- date
- payload
Returns
-------
DataFrame
Processed dataframe
"""
to_sql(
df
.join(get_area_data(), on=["area_type", "area_code"])
.pipe(validate_metrics)
.pipe(lambda d: d.assign(hash=generate_row_hash(d, hash_only=True)))
.loc[:, ["metric_id", "area_id", "partition_id", "release_id", "hash", "date", "payload"]]
)
return df
def deploy_preprocessed(df, key):
df.loc[:, key] = df.loc[:, key].map(list)
to_sql(
df
.rename(columns={key: "payload"})
.assign(metric=key)
.join(get_area_data(), on=["area_type", "area_code"])
.pipe(validate_metrics)
.pipe(lambda d: d.assign(hash=generate_row_hash(d, hash_only=True)))
.loc[:, ["metric_id", "area_id", "partition_id", "release_id", "hash", "date", "payload"]]
)
return df
def convert_timestamp(dt: DataFrame, timestamp):
if "release_timestamp" in dt.columns:
ts = dt.loc[:, "release_timestamp"].unique()[0]
if ts == timestamp and not isinstance(ts, str):
return dt
dt.drop(columns=["release_timestamp"], inplace=True)
dt = dt.assign(release_timestamp=timestamp.isoformat() + "Z")
dt.loc[:, "release_timestamp"] =
|
to_datetime(dt.release_timestamp)
|
pandas.to_datetime
|
#########################################
# Time Series Figures
#########################################
#### Import Libraries and Functions
from pyhydroqc import anomaly_utilities, rules_detect, calibration
from pyhydroqc.parameters import site_params
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import numpy as np
import os
colors = ['#0C7BDC', '#F3870D', '#24026A', '#AF3C31']
# FIGURES 3 (gap values and drift correction), 4 (threshold), C1 (detection example), C2 (long labeled event),
# C3 (model detection for calibration events)
# These figures all use data from Main Street.
#### Retrieve data
#########################################
site = 'MainStreet'
sensors = ['temp', 'cond', 'ph', 'do']
years = [2014, 2015, 2016, 2017, 2018, 2019]
sensor_array = anomaly_utilities.get_data(sensors=sensors, site=site, years=years, path="./LRO_data/")
#### Rules Based Anomaly Detection
#########################################
range_count = dict()
persist_count = dict()
rules_metrics = dict()
for snsr in sensor_array:
sensor_array[snsr], range_count[snsr] = rules_detect.range_check(df=sensor_array[snsr],
maximum=site_params[site][snsr]['max_range'],
minimum=site_params[site][snsr]['min_range'])
sensor_array[snsr], persist_count[snsr] = rules_detect.persistence(df=sensor_array[snsr],
length=site_params[site][snsr]['persist'],
output_grp=True)
sensor_array[snsr] = rules_detect.interpolate(df=sensor_array[snsr])
print('Rules based detection complete.\n')
### Find Gap Values
#########################################
# Subset of sensors that are calibrated
calib_sensors = sensors[1:4]
# Initialize data structures
calib_dates = dict()
gaps = dict()
shifts = dict()
tech_shifts = dict()
for cal_snsr in calib_sensors:
# Import calibration dates
calib_dates[cal_snsr] = pd.read_csv('./LRO_data/' + site + '_' + cal_snsr + '_calib_dates.csv',
header=1, parse_dates=True, infer_datetime_format=True)
calib_dates[cal_snsr]['start'] = pd.to_datetime(calib_dates[cal_snsr]['start'])
calib_dates[cal_snsr]['end'] = pd.to_datetime(calib_dates[cal_snsr]['end'])
# Ensure date range of calibrations correspond to imported data
calib_dates[cal_snsr] = calib_dates[cal_snsr].loc[(calib_dates[cal_snsr]['start'] > min(sensor_array[cal_snsr].index)) &
(calib_dates[cal_snsr]['end'] < max(sensor_array[cal_snsr].index))]
# Initialize dataframe to store determined gap values and associated dates
gaps[cal_snsr] = pd.DataFrame(columns=['end', 'gap'],
index=range(min(calib_dates[cal_snsr].index), max(calib_dates[cal_snsr].index)+1))
if len(calib_dates[cal_snsr]) > 0:
# Initialize data structures
shifts[cal_snsr] = []
# Loop through each calibration event date.
for i in range(min(calib_dates[cal_snsr].index), max(calib_dates[cal_snsr].index)+1):
# Apply find_gap routine, add to dataframe, add output of shifts to list.
gap, end = calibration.find_gap(observed=sensor_array[cal_snsr]['observed'],
calib_date=calib_dates[cal_snsr]['end'][i],
hours=2,
show_shift=False)
gaps[cal_snsr].loc[i]['end'] = end
gaps[cal_snsr].loc[i]['gap'] = gap
print('Gap value determination complete.\n')
# Review gaps and make adjustments as needed before performing drift correction
gaps['cond'].loc[3, 'gap'] = 4
gaps['cond'].loc[4, 'gap'] = 10
gaps['cond'].loc[21, 'gap'] = 0
gaps['cond'].loc[39, 'gap'] = -5
gaps['cond'].loc[41, 'gap'] = 4
gaps['ph'].loc[33, 'gap'] = -0.04
gaps['ph'].loc[43, 'gap'] = 0.12
gaps['ph'].loc[43, 'end'] = '2019-08-15 15:00'
#### Perform Linear Drift Correction
#########################################
calib_sensors = sensors[1:4]
for cal_snsr in calib_sensors:
# Set start dates for drift correction at the previously identified calibration (one month back for the first calibration.)
gaps[cal_snsr]['start'] = gaps[cal_snsr]['end'].shift(1)
gaps[cal_snsr]['start'][0] = gaps[cal_snsr]['end'][0] - pd.Timedelta(days=30)
if len(gaps[cal_snsr]) > 0:
for i in range(min(gaps[cal_snsr].index), max(gaps[cal_snsr].index) + 1):
result, sensor_array[cal_snsr]['observed'] = calibration.lin_drift_cor(observed=sensor_array[cal_snsr]['observed'],
start=gaps[cal_snsr]['start'][i],
end=gaps[cal_snsr]['end'][i],
gap=gaps[cal_snsr]['gap'][i],
replace=True)
print('Linear drift correction complete.\n')
## FIGURE 3 ##
#########################################
# Compare calibration and drift correction to Observed data and to technician corrected.
cal_snsr = 'ph'
df = sensor_array[cal_snsr]
plt.figure(figsize=(10, 4))
plt.plot(df['raw'], colors[0], label='Observed data')
plt.plot(df['cor'], colors[1], label='Technician corrected')
plt.plot(df['observed'], colors[3], label='Algorithm corrected')
plt.xlim(datetime.datetime(2014, 7, 24), datetime.datetime(2014, 8, 1)) # Specify date range of plot
plt.ylim(7.6, 8.4)
plt.legend()
plt.ylabel('pH')
plt.xlabel('Date')
plt.show()
plt.savefig('Figures/Figure3.png', bbox_inches='tight')
## FIGURE 4 ##
#########################################
# Examine thresholds and model residuals
# set working directory for importing model results.
os.chdir('Examples/Plotting')
ARIMA_detections = pd.read_csv('ARIMA_detections_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
ARIMA_threshold = pd.read_csv('ARIMA_threshold_MainStreet_cond.csv',
header=0,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
plt.figure(figsize=(10, 4))
plt.plot(ARIMA_detections['residual'], 'b', label='Model residuals')
plt.plot(ARIMA_threshold['low'], 'c', label='Upper threshold')
plt.plot(ARIMA_threshold['high'], 'm', mfc='none', label='Lower threshold')
plt.xlim(datetime.datetime(2015, 7, 8), datetime.datetime(2015, 8, 14)) # Specify date range of plot
plt.ylim(-200, 150)
plt.xticks(
|
pd.date_range(start='7/9/2015', end='8/14/2015', freq='5D')
|
pandas.date_range
|
########################################
#This script reads data and convert it to a code redable format to be used in next steps
#########################################
from sklearn.datasets import fetch_20newsgroups
from pandas import DataFrame
import unicodedata
newsgroups_train = fetch_20newsgroups(subset='train')
newsgroups_test = fetch_20newsgroups(subset='test')
rows_train = []
rows_test = []
rows_all = []
# Path for saving all tsv files.
path_train = 'data/train_v2.tsv'
path_test = 'data/test_v2.tsv'
path_all = 'data/all_v2.tsv'
data_train =
|
DataFrame({'news': [], 'class': []})
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
"""
This script has to be executed after hi_freq_data_to_csv.py and get_interval.py have succesfully run.
This script should be called with 1 (or 2) arguments.
The 1st mandatory argument is the ABSOLUTE path of the top directory for the flight campaign.
/media/spectors/HDD320/lidar/20201218_fresh <<----- This is it!
----------------------------/20201218_fresh/p_00_joined_pcap_files
----------------------------/20201218_fresh/p_01_apx_csv_shapefile <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/p_02_plt <<----- Not used. Just for reference.
----------------------------/20201218_fresh/p_03_pcap <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/2_planned_mision
----------------------------/20201218_fresh/ .....
----------------------------/20201218_fresh/logging <<----- This is where the logs will be stored.
----------------------------/20201218_fresh/transl_table.txt <<----- This must be present and will be used as input.
The 2nd optional argument can be a boresight-calibration string.
It must contain the boresight angles and be of the following form:
# RabcdefghPijklmnopYqrstuvwx
# Where abcdefgh is milionths of degree to ROLL. a is sign (p/n)
# ..... ijklmnop is milionths of degree to PITCH. i is sign (p/n)
# ..... qrstuvwx is milionths of degree to YAW. q is sign (p/n)
# In this order! ROLL -> PITCH -> YAW !
# Theoretically can encode up to 9.9° around each axis
This script combines .csv files with each of the .pcap flight lines and writes point clouds in .txt files.
It then calls a lew lastools to convert them to las, denoise and set the correct (georeference) metadata.
The script is run non-interactively.
The only exception is choosing the p_01_apx_csv_shapefile and p__03_pcap folders at the beginning if there are muktiple of them.
TO DO: add support for different EPSG codes.
"""
from scapy.all import rdpcap
import time, os, sys, datetime, platform, logging, shutil, re, io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from osgeo import gdal, ogr, osr
from scipy.interpolate import interp1d
from vlp16_tables import *
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
debug = False
log_dir = 'p_logging'
txt_dir_in = 'p_01_apx_csv_shapefile'
txt_in_base_len = len(txt_dir_in)
pcap_dir_in = 'p_22_likely_gcp_pcap'
pcap_in_base_len = len(pcap_dir_in)
out_dir_ascii = 'p_23_boresight_iterations'
out_ascii_base_len = len(out_dir_ascii)
transl_table_fn = 'p_transl_table.txt'
fn_keyword = 'hi_freq_apx'
nl = '\n'
pcap_contents = None
raw_contents = None
def shorten_string(text_string):
"""
Function to remove all duplicates from string
and keep the order of characters same
https://www.geeksforgeeks.org/remove-duplicates-given-string-python/
"""
return "".join(OrderedDict.fromkeys(text_string))
def remove_min_sec(ts):
return (int(ts) // 3600) * 3600
# ### Function to calculate the gaps between given azimuths. Needed to interpolate azimuths that are not given.
def get_azim_gap(azimuths, dual = True, preserve_shape = False):
"""
Only works for dual returns now.
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
azimuths_gap_flat = np.zeros_like(azimuths[:,0::2]).flatten()
azimuths_gap_flat[:-1] = ((azimuths[:,0::2].flatten()[1:] - azimuths[:,0::2].flatten()[:-1]) % 36000)
azimuths_gap_flat[-1] = azimuths_gap_flat[-2]
azimuths_gap = azimuths_gap_flat.reshape(azimuths[:,0::2].shape)
if preserve_shape:
#either of the following lines should work the same. only use one.
#azimuths_gap = np.hstack((azimuths_gap.reshape((azimuths_gap.size,1)), azimuths_gap.reshape((azimuths_gap.size,1)))).flatten().reshape(azimuths.shape)
azimuths_gap = np.tile(azimuths_gap,2)
return azimuths_gap
else:
raise NotImplementedError
def get_micros_pulses(micros, dual = True, preserve_shape = False):
"""
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
if preserve_shape:
micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_DUAL.T.flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_DUAL.T[0::2,:].flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) + TIMING_OFFSETS_SINGLE.T.flatten() * 1e6
return micros_pulses
def get_precision_azimuth(az_simple, azimuths_gap, dual = True, minimal_shape = True):
if dual:
timing_offsets_within_block = TIMING_OFFSETS_DUAL[:,0] # TIMING_OFFSETS_DUAL[:,0] is the same as TIMING_OFFSETS_SINGLE[:,0] !
az_pulses = np.tile(az_simple,(LASERS_PER_DATA_BLOCK)).reshape(az_simple.shape[0], LASERS_PER_DATA_BLOCK, az_simple.shape[1])
az_pulses = az_pulses.transpose((0,2,1))
precision_azimuth = az_pulses[:,:,:] + timing_offsets_within_block / (2 * T_CYCLE) * np.expand_dims(azimuths_gap, axis=2)
precision_azimuth = precision_azimuth % 36000
if not minimal_shape:
precision_azimuth = np.tile(precision_azimuth.transpose((0,2,1)), (1,2,1)).transpose((0,2,1))
precision_azimuth = precision_azimuth.reshape((precision_azimuth.shape[0], precision_azimuth.shape[1] * precision_azimuth.shape[2]))
return precision_azimuth
else:
raise NotImplementedError
def boresight_str_to_angles(boresight_str):
scale_factor = 1e-6
boresight_roll = scale_factor * int(boresight_str[2:9]) * (1 if boresight_str[1] == "p" else -1)
boresight_pitch = scale_factor * int(boresight_str[11:18]) * (1 if boresight_str[10] == "p" else -1)
boresight_yaw = scale_factor * int(boresight_str[20:27]) * (1 if boresight_str[19] == "p" else -1)
return boresight_roll, boresight_pitch, boresight_yaw
def read_all_pcap_files_once(pcap_dir_in):
pcap_data = dict()
raw_data = dict()
fnames = sorted([fn for fn in os.listdir(pcap_dir_in) if "line_" in fn and len(fn) == 24 and "pcap" in fn])
for pcap_file_in in fnames:
packets = rdpcap(os.path.join(pcap_dir_in, pcap_file_in))
pcap_data[pcap_file_in] = packets
with open(os.path.join(pcap_dir_in, pcap_file_in), "rb") as fh:
raw_pcap = fh.read()
#24 bytes = global header, 16+42 bytes = packet header, 1200 bytes = vlp returns, 2 bytes = vlp factory bytes
#raw_data[pcap_file_in] = [np.frombuffer(raw_pcap, dtype = np.uint8)[24:].reshape((len(raw_pcap)//1264,1264))[:,16+42:]]
raw_data[pcap_file_in] = np.frombuffer(raw_pcap, dtype = np.uint8)[24:].reshape((len(raw_pcap)//1264,1264))[:,16+42:].flatten()
raw_data = pd.DataFrame.from_dict(raw_data)
raw_data = raw_data.to_records(index=False)
timestamps = {k: [float(val[0].time)] for k, val in pcap_data.items()}
timestamps =
|
pd.DataFrame.from_dict(timestamps, dtype=np.float64)
|
pandas.DataFrame.from_dict
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253:
|
pd.Timestamp("2013-01-10 00:00:00")
|
pandas.Timestamp
|
import pandas as pd
from collections import defaultdict
import math
from DoD.utils import FilterType
import config as C
import os
import psutil
from tqdm import tqdm
import time
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Cache reading and transformation of DFs
cache = dict()
memory_limit_join_processing = C.memory_limit_join_processing * psutil.virtual_memory().total
data_separator = C.separator
tmp_spill_file = "./tmp_spill_file.tmp"
# tmp_df_chunk = "./chunk_df"
def configure_csv_separator(separator):
global data_separator
data_separator = separator
def estimate_output_row_size(a: pd.DataFrame, b: pd.DataFrame):
# 1. check each dataframe's size in memory and number of rows
# a_bytes = sum(a.memory_usage(deep=True))
# b_bytes = sum(b.memory_usage(deep=True))
a_bytes = sum(a.memory_usage(deep=False))
b_bytes = sum(b.memory_usage(deep=False))
a_len_rows = len(a)
b_len_rows = len(b)
# 2. estimate size per row from previous
a_row_size = float(a_bytes/a_len_rows)
b_row_size = float(b_bytes/b_len_rows)
# 3. estimate row size of output join (no selections)
o_row_size = a_row_size + b_row_size
return o_row_size
def does_join_fit_in_memory(chunk, ratio, o_row_size):
estimated_output_num_rows = (float)((chunk / ratio))
estimated_output_size = estimated_output_num_rows * o_row_size
if estimated_output_size >= memory_limit_join_processing:
# eos_gb = estimated_output_size / 1024 / 1024 / 1024
# print("Estimated Output size in GB: " + str(eos_gb))
return False, estimated_output_size/1024/1024/1024
return True, estimated_output_size/1024/1024/1024
def join_ab_on_key_optimizer(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str,
suffix_str=None, chunksize=C.join_chunksize, normalize=True):
# clean up temporal stuff -- i.e., in case there was a crash
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
# if normalize:
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
try:
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
except KeyError:
print("COLS: " + str(b.columns))
print("KEY: " + str(b_key))
# drop NaN/Null values
a.dropna(subset=[a_key], inplace=True)
b.dropna(subset=[b_key], inplace=True)
a_drop_indices = [i for i, el in enumerate(a[a_key]) if el == 'nan' or el == 'null' or el is pd.NaT]
b_drop_indices = [i for i, el in enumerate(b[b_key]) if el == 'nan' or el == 'null' or el is pd.NaT]
a.drop(a_drop_indices, inplace=True)
b.drop(b_drop_indices, inplace=True)
a.reset_index(drop=True)
b.reset_index(drop=True)
if len(a) == 0 or len(b) == 0:
return False
# Estimate output join row size
o_row_size = estimate_output_row_size(a, b)
# join by chunks
def join_chunk(chunk_df, header=False):
# print("First chunk? : " + str(header))
# print("a: " + str(len(a)))
# print("b: " + str(len(chunk_df)))
# worst_case_estimated_join_size = chunksize * len(a) * o_row_size
# if worst_case_estimated_join_size >= memory_limit_join_processing:
# print("Can't join sample. Size: " + str(worst_case_estimated_join_size))
# return False # can't even join a sample
# print(a[a_key].head(10))
# print(chunk_df[b_key].head(10))
target_chunk = pd.merge(a, chunk_df, left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
if header: # header is only activated the first time. We only want to do this check the first time
# sjt = time.time()
fits, estimated_join_size = does_join_fit_in_memory(len(target_chunk), (float)(chunksize/len(b)), o_row_size)
# ejt = time.time()
# join_time = (float)((ejt - sjt) * (float)(len(b)/chunksize))
# print("Est. join time: " + str(join_time))
print("Estimated join size: " + str(estimated_join_size))
# if estimated_join_size < 0.01:
# print("TC: " + str(len(target_chunk)))
# print("Ratio: " + str((float)(chunksize/len(b))))
# print("row size: " + str(o_row_size))
# print("FITS? : " + str(fits))
if fits:
return True
else:
return False
target_chunk.to_csv(tmp_spill_file, mode="a", header=header, index=False)
return False
def chunk_reader(df):
len_df = len(df)
init_index = 0
num_chunks = math.ceil(len_df / chunksize)
for i in range(num_chunks):
chunk_df = df[init_index:init_index + chunksize]
init_index += chunksize
yield chunk_df
# swap row order of b to approximate uniform sampling
b = b.sample(frac=1).reset_index(drop=True)
first_chunk = True
all_chunks = [chunk for chunk in chunk_reader(b)]
# for chunk in tqdm(all_chunks):
for chunk in all_chunks:
scp = time.time()
if first_chunk:
fits_in_memory = join_chunk(chunk, header=True)
first_chunk = False
if fits_in_memory: # join in memory and exit
return join_ab_on_key(a, b, a_key, b_key, suffix_str=suffix_str, normalize=False)
else: # just ignore no-fit in memory chunks
return False
else:
join_chunk(chunk)
ecp = time.time()
chunk_time = ecp - scp
estimated_total_time = chunk_time * len(all_chunks)
print("ETT: " + str(estimated_total_time))
if estimated_total_time > 60 * 3: # no more than 3 minutes
return False # cancel this join without breaking the whole pipeline
print("Reading written down relation: ")
# [join_chunk(chunk) for chunk in chunk_reader(b)]
joined = pd.read_csv(tmp_spill_file, encoding='latin1', sep=data_separator)
# clean up temporal stuff
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
return joined
def join_ab_on_key_spill_disk(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None, chunksize=C.join_chunksize):
# clean up temporal stuff -- i.e., in case there was a crash
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
try:
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
except KeyError:
print("COLS: " + str(b.columns))
print("KEY: " + str(b_key))
# Calculate target columns
# a_columns = set(a.columns)
# b_columns = pd.Index([column if column not in a_columns else column + suffix_str for column in b.columns])
#
# # Write to disk the skeleton of the target
# df_target = pd.DataFrame(columns=(a.columns.append(b_columns)))
# df_target.to_csv(tmp_spill_file, index_label=False)
# join by chunks
def join_chunk(chunk_df, header=False):
# chunk_df[b_key] = chunk_df[b_key].apply(lambda x: str(x).lower()) # transform to string for join
target_chunk = pd.merge(a, chunk_df, left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
target_chunk.to_csv(tmp_spill_file, mode="a", header=header, index=False)
def chunk_reader(df):
len_df = len(df)
init_index = 0
num_chunks = math.ceil(len_df / chunksize)
for i in range(num_chunks):
chunk_df = df[init_index:init_index + chunksize]
init_index += chunksize
yield chunk_df
# b.to_csv(tmp_df_chunk, index_label=False)
# chunk_reader = pd.read_csv(tmp_df_chunk, encoding='latin1', sep=data_separator, chunksize=chunksize)
first_chunk = True
for chunk in chunk_reader(b):
if first_chunk:
join_chunk(chunk, header=True)
first_chunk = False
else:
join_chunk(chunk)
# [join_chunk(chunk) for chunk in chunk_reader(b)]
joined = pd.read_csv(tmp_spill_file, encoding='latin1', sep=data_separator)
# clean up temporal stuff
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
return joined
def join_ab_on_key(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None, normalize=True):
if normalize:
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
joined = pd.merge(a, b, how='inner', left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
return joined
# def update_relation_cache(relation_path, df):
# if relation_path in cache:
# cache[relation_path] = df
def read_relation(relation_path):
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
cache[relation_path] = df
return df
def read_relation_on_copy(relation_path):
"""
This is assuming than copying a DF is cheaper than reading it back from disk
:param relation_path:
:return:
"""
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
cache[relation_path] = df
return df.copy()
def empty_relation_cache():
global cache
cache = dict()
def get_dataframe(path):
# TODO: only csv is supported
df = pd.read_csv(path, encoding='latin1', sep=data_separator)
return df
def _join_ab_on_key(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None):
# First make sure to remove empty/nan values from join columns
# TODO: Generate data event if nan values are found
a_valid_index = (a[a_key].dropna()).index
b_valid_index = (b[b_key].dropna()).index
a = a.iloc[a_valid_index]
b = b.iloc[b_valid_index]
# Normalize join columns
# a_original = a[a_key].copy()
# b_original = b[b_key].copy()
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
joined = pd.merge(a, b, how='inner', left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
# # Recover format of original columns
# FIXME: would be great to do this, but it's broken
# joined[a_key] = a_original
# joined[b_key] = b_original
return joined
def apply_filter(relation_path, attribute, cell_value):
# if relation_path in cache:
# df = cache[relation_path]
# else:
# df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
# # store in cache
# cache[relation_path] = df
df = read_relation_on_copy(relation_path) # FIXME FIXE FIXME
# df = get_dataframe(relation_path)
df[attribute] = df[attribute].apply(lambda x: str(x).lower())
# update_relation_cache(relation_path, df)
df = df[df[attribute] == cell_value]
return df
def find_key_for(relation_path, key, attribute, value):
"""
select key from relation where attribute = value;
"""
# normalize this value
value = str(value).lower()
# Check if DF in cache
if relation_path in cache:
df = cache[relation_path]
else:
df =
|
pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
import seaborn as sns
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from functools import wraps
from . import utils
from . import performance as perf
from .plot_utils import (
print_table, customize, ICTS, ICHIST, ICQQ, QRETURNBAR, QRETURNVIOLIN,
QRETURNTS, ICGROUP, AUTOCORR, TBTURNOVER, ICHEATMAP, CUMRET, TDCUMRET,
CUMRETQ, AVGCUMRET, EVENTSDIST, MISSIINGEVENTSDIST
)
DECIMAL_TO_BPS = 10000
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create alphalens default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
with alphalens.plotting.plotting_context(font_scale=2):
alphalens.create_full_tear_sheet(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""Create alphalens default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
with alphalens.plotting.axes_style(style='whitegrid'):
alphalens.create_full_tear_sheet(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
def plot_returns_table(alpha_beta,
mean_ret_quantile,
mean_ret_spread_quantile):
returns_table = pd.DataFrame()
returns_table = returns_table.append(alpha_beta)
returns_table.loc["Mean Period Wise Return Top Quantile (bps)"] = \
mean_ret_quantile.iloc[-1] * DECIMAL_TO_BPS
returns_table.loc["Mean Period Wise Return Bottom Quantile (bps)"] = \
mean_ret_quantile.iloc[0] * DECIMAL_TO_BPS
returns_table.loc["Mean Period Wise Spread (bps)"] = \
mean_ret_spread_quantile.mean() * DECIMAL_TO_BPS
print("收益分析")
print_table(returns_table.apply(lambda x: x.round(3)))
def plot_turnover_table(autocorrelation_data, quantile_turnover):
turnover_table = pd.DataFrame()
for ret_name in quantile_turnover.keys():
for quantile, p_data in quantile_turnover[ret_name].iteritems():
turnover_table.loc["Quantile {} Mean Turnover ".format(quantile),
"{}".format(ret_name)] = p_data.mean()
auto_corr = pd.DataFrame()
for ret_name, p_data in autocorrelation_data.items():
auto_corr.loc["Mean Factor Rank Autocorrelation",
"{}".format(ret_name)] = p_data.mean()
print("换手率分析")
print_table(turnover_table.apply(lambda x: x.round(3)))
print_table(auto_corr.apply(lambda x: x.round(3)))
def plot_information_table(ic_data):
ic_summary_table =
|
pd.DataFrame()
|
pandas.DataFrame
|
import urllib.request
from bs4 import BeautifulSoup
import csv
from time import sleep
import pandas as pd
import json
import urllib.request
import os
from PIL import Image
result = {}
with open('data/data_all.csv', 'r') as f:
reader = csv.reader(f)
header = next(reader) # ヘッダーを読み飛ばしたい時
for row in reader:
# print(row)
title1 = row[0]
id1 = row[1]
if id1 not in result:
# result[title1] = {}
result[id1] = {
"title": title1,
"children": {}
}
tmp = result[id1]["children"]
title2 = row[2]
id2 = row[3]
if id2 not in tmp:
tmp[id2] = {
"title": title2,
"children": {}
}
tmp = tmp[id2]["children"]
no = row[4]
desc = row[5]
if no not in tmp:
tmp[no] = {
"desc": desc,
"images": []
}
tmp = tmp[no]
img = row[6]
tmp["images"].append(img)
data = []
data.append(["ID", "Media Url"])
for id1 in result:
obj1 = result[id1]["children"]
title1 = result[id1]["title"]
for id2 in obj1:
print("**"+id2)
obj2 = obj1[id2]["children"]
title2 = obj1[id2]["title"]
for no in obj2:
obj3 = obj2[no]
id = id1+"-"+id2+"-"+str(no).zfill(4)
for i in range(len(obj3["images"])):
img_url = obj3["images"][i]
row = [id, img_url]
data.append(row)
df = pd.DataFrame(data)
writer =
|
pd.ExcelWriter('data2/images.xlsx', options={'strings_to_urls': False})
|
pandas.ExcelWriter
|
import os
import random
import stat
from collections import Counter
from datetime import datetime
from itertools import groupby
import numpy as np
import pandas as pd
import seaborn as sns
from django.core.paginator import Paginator
from django.db import connection
from django.db.models import Count, Q
from django.db.models.functions import TruncMonth
from api.models import (
AlbumAuto,
AlbumDate,
AlbumUser,
Face,
LongRunningJob,
Person,
Photo,
)
from api.serializers.serializers import LongRunningJobSerializer
from api.util import logger
def get_current_job():
job_detail = None
running_job = (
LongRunningJob.objects.filter(finished=False).order_by("-started_at").first()
)
if running_job:
job_detail = LongRunningJobSerializer(running_job).data
return job_detail
def shuffle(list):
random.shuffle(list)
return list
def is_hidden(filepath):
name = os.path.basename(os.path.abspath(filepath))
return name.startswith(".") or has_hidden_attribute(filepath)
def has_hidden_attribute(filepath):
try:
return bool(os.stat(filepath).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN)
except Exception:
return False
def path_to_dict(path, recurse=2):
d = {"title": os.path.basename(path), "absolute_path": path}
if recurse > 0:
d["children"] = [
path_to_dict(os.path.join(path, x), recurse - 1)
for x in os.scandir(path)
if os.path.isdir(os.path.join(path, x))
and not is_hidden(os.path.join(path, x))
]
else:
d["children"] = []
return d
def jump_by_month(start_date, end_date, month_step=1):
current_date = start_date
yield current_date
while current_date < end_date:
carry, new_month = divmod(current_date.month - 1 + month_step, 12)
new_month += 1
current_date = current_date.replace(
year=current_date.year + carry, month=new_month
)
yield current_date
def get_location_timeline(user):
qs_photos = (
Photo.objects.exclude(geolocation_json={})
.exclude(exif_timestamp=None)
.filter(owner=user)
.order_by("exif_timestamp")
)
timestamp_loc = []
paginator = Paginator(qs_photos, 5000)
for page in range(1, paginator.num_pages + 1):
current_page = [
(p.exif_timestamp, p.geolocation_json["features"][-1]["text"])
for p in paginator.page(page).object_list
]
timestamp_loc = timestamp_loc + current_page
groups = []
uniquekeys = []
for k, g in groupby(timestamp_loc, lambda x: x[1]):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
city_start_end_duration = []
for idx, group in enumerate(groups):
city = group[0][1]
start = group[0][0]
if idx < len(groups) - 1:
end = groups[idx + 1][0][0]
else:
end = group[-1][0]
time_in_city = (end - start).total_seconds()
if time_in_city > 0:
city_start_end_duration.append([city, start, end, time_in_city])
locs = list(set([e[0] for e in city_start_end_duration]))
colors = sns.color_palette("Paired", len(locs)).as_hex()
loc2color = dict(zip(locs, colors))
intervals_in_seconds = []
for idx, sted in enumerate(city_start_end_duration):
intervals_in_seconds.append(
{
"loc": sted[0],
"start": sted[1].timestamp(),
"end": sted[2].timestamp(),
"dur": sted[2].timestamp() - sted[1].timestamp(),
}
)
data = [
{
"data": [d["dur"]],
"color": loc2color[d["loc"]],
"loc": d["loc"],
"start": d["start"],
"end": d["end"],
}
for d in intervals_in_seconds
]
return data
def get_search_term_examples(user):
default_search_terms = [
"for people",
"for places",
"for things",
"for time",
"for file path or file name",
]
pp = Photo.objects.filter(owner=user).exclude(captions_json={})
possible_ids = list(pp.values_list("image_hash", flat=True))
if len(possible_ids) > 99:
possible_ids = random.choices(possible_ids, k=100)
logger.info(f"{len(possible_ids)} possible ids")
try:
samples = (
pp.filter(image_hash__in=possible_ids)
.prefetch_related("faces")
.prefetch_related("faces__person")
.all()
)
except ValueError:
return default_search_terms
search_data = []
search_terms = default_search_terms
logger.info("Getting search terms for user %s", user.id)
logger.info("Found %s photos", len(samples))
for p in samples:
faces = p.faces.all()
terms_loc = ""
if p.geolocation_json != {}:
terms_loc = [
f["text"]
for f in p.geolocation_json["features"][-5:]
if not f["text"].isdigit()
]
terms_time = ""
if p.exif_timestamp:
terms_time = [str(p.exif_timestamp.year)]
terms_people = []
if p.faces.count() > 0:
terms_people = [f.person.name.split(" ")[0] for f in faces]
terms_things = ""
if p.captions_json and p.captions_json["places365"] is not None:
terms_things = p.captions_json["places365"]["categories"]
terms = {
"loc": terms_loc,
"time": terms_time,
"people": terms_people,
"things": terms_things,
}
search_data.append(terms)
search_terms = []
for datum in search_data:
term_time = ""
term_thing = ""
term_loc = ""
term_people = ""
if datum["loc"]:
term_loc = random.choice(datum["loc"])
search_terms.append(term_loc)
if datum["time"]:
term_time = random.choice(datum["time"])
search_terms.append(term_time)
if datum["things"]:
term_thing = random.choice(datum["things"])
search_terms.append(term_thing)
if datum["people"]:
term_people = random.choice(datum["people"])
search_terms.append(term_people)
search_term_loc_people = " ".join(shuffle([term_loc, term_people]))
if random.random() > 0.3:
search_terms.append(search_term_loc_people)
search_term_time_people = " ".join(shuffle([term_time, term_people]))
if random.random() > 0.3:
search_terms.append(search_term_time_people)
search_term_people_thing = " ".join(shuffle([term_people, term_thing]))
if random.random() > 0.9:
search_terms.append(search_term_people_thing)
search_term_all = " ".join(
shuffle([term_loc, term_people, term_time, term_thing])
)
if random.random() > 0.95:
search_terms.append(search_term_all)
search_term_loc_time = " ".join(shuffle([term_loc, term_time]))
if random.random() > 0.3:
search_terms.append(search_term_loc_time)
search_term_loc_thing = " ".join(shuffle([term_loc, term_thing]))
if random.random() > 0.9:
search_terms.append(search_term_loc_thing)
search_term_time_thing = " ".join(shuffle([term_time, term_thing]))
if random.random() > 0.9:
search_terms.append(search_term_time_thing)
return list(set(search_terms))
def get_count_stats(user):
num_photos = Photo.objects.filter(owner=user).count()
num_missing_photos = Photo.objects.filter(Q(owner=user) & Q(image_paths=[])).count()
num_faces = Face.objects.filter(photo__owner=user).count()
num_unknown_faces = Face.objects.filter(
Q(person__name__exact="unknown") & Q(photo__owner=user)
).count()
num_labeled_faces = Face.objects.filter(
Q(person_label_is_inferred=False)
& ~Q(person__name__exact="unknown")
& Q(photo__owner=user)
& Q(photo__hidden=False)
).count()
num_inferred_faces = Face.objects.filter(
Q(person_label_is_inferred=True) & Q(photo__owner=user) & Q(photo__hidden=False)
).count()
num_people = (
Person.objects.filter(
Q(faces__photo__hidden=False)
& Q(faces__photo__owner=user)
& Q(faces__person_label_is_inferred=False)
)
.distinct()
.annotate(viewable_face_count=Count("faces"))
.filter(Q(viewable_face_count__gt=0))
.count()
)
num_albumauto = (
AlbumAuto.objects.filter(owner=user)
.annotate(photo_count=Count("photos"))
.filter(Q(photo_count__gt=0))
.count()
)
num_albumdate = (
AlbumDate.objects.filter(owner=user)
.annotate(photo_count=Count("photos"))
.filter(Q(photo_count__gt=0))
.count()
)
num_albumuser = (
AlbumUser.objects.filter(owner=user)
.annotate(photo_count=Count("photos"))
.filter(Q(photo_count__gt=0))
.count()
)
res = {
"num_photos": num_photos,
"num_missing_photos": num_missing_photos,
"num_faces": num_faces,
"num_people": num_people,
"num_unknown_faces": num_unknown_faces,
"num_labeled_faces": num_labeled_faces,
"num_inferred_faces": num_inferred_faces,
"num_albumauto": num_albumauto,
"num_albumdate": num_albumdate,
"num_albumuser": num_albumuser,
}
return res
def get_location_clusters(user):
start = datetime.now()
photos = (
Photo.objects.filter(owner=user)
.exclude(geolocation_json={})
.only("geolocation_json")
.all()
)
coord_names = []
paginator = Paginator(photos, 5000)
for page in range(1, paginator.num_pages + 1):
for p in paginator.page(page).object_list:
for feature in p.geolocation_json["features"]:
if not feature["text"].isdigit():
coord_names.append([feature["text"], feature["center"]])
groups = []
uniquekeys = []
coord_names.sort(key=lambda x: x[0])
for k, g in groupby(coord_names, lambda x: x[0]):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
res = [[g[0][1][1], g[0][1][0], g[0][0]] for g in groups]
elapsed = (datetime.now() - start).total_seconds()
logger.info("location clustering took %.2f seconds" % elapsed)
return res
def get_photo_country_counts(user):
photos_with_gps = Photo.objects.exclude(geolocation_json=None).filter(owner=user)
geolocations = [p.geolocation_json for p in photos_with_gps]
countries = []
for gl in geolocations:
if "features" in gl.keys():
for feature in gl["features"]:
if feature["place_type"][0] == "country":
countries.append(feature["place_name"])
counts = Counter(countries)
return counts
def get_location_sunburst(user):
photos_with_gps = (
Photo.objects.exclude(geolocation_json={})
.exclude(geolocation_json=None)
.filter(owner=user)
)
if photos_with_gps.count() == 0:
return {"children": []}
geolocations = []
paginator = Paginator(photos_with_gps, 5000)
for page in range(1, paginator.num_pages + 1):
for p in paginator.page(page).object_list:
geolocations.append(p.geolocation_json)
four_levels = []
for gl in geolocations:
out_dict = {}
if "features" in gl.keys():
if len(gl["features"]) >= 1:
out_dict[1] = gl["features"][-1]["text"]
if len(gl["features"]) >= 2:
out_dict[2] = gl["features"][-2]["text"]
if len(gl["features"]) >= 3:
out_dict[3] = gl["features"][-3]["text"]
four_levels.append(out_dict)
df =
|
pd.DataFrame(four_levels)
|
pandas.DataFrame
|
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList3[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList3[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == 'all_tumour' and disName2 == 'all_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis4_csv = pd.read_csv(csv_dir + '/' + picID + 'other_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
ptList4 = dis4_csv.values[:,:2]
ptList6 = np.concatenate((ptList3, ptList4), axis=0)
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList6[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList6[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
for i in range(len(ptList4)):
img = cv2.circle(img, tuple(ptList4[i,:2]), 4, (255, 0, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 != disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + disName2 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 5, cellName_color[disName2], -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i, :2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList1)** 2, axis=1).astype(np.float32))
minDisInd = np.argmin(disMat)
disMat[minDisInd] = 1000.0
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList1[ptList1[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
cv2.imwrite(picID + disName1 + '_dis.png', img)
def getAllPicsDisCSV():
'''
Get all distance data from the saved csv files (get from the above functions)
'''
base_dir = '/data/Datasets/MediImgExp'
f = open( base_dir + '/' + 'AllDisData.csv','w',encoding='utf-8',newline="")
csv_writer = csv.writer(f)
csv_writer.writerow([ 'Ind','PosiTumourRatio','PosiLymphRatio',
'DisTumourLymph','DisPosiTumour','DisPosiLymph',
'DisPosiTumourPosiLymph','DisTumourPosiLymph'])
process_dir = base_dir + '/process'
csv_dir = base_dir + '/data_csv'
pic_name = os.listdir(process_dir)
picIDList = []
for pic_name_ in pic_name:
picIDList.append( pic_name_.split('_')[0] )
for picID in picIDList:
list_data = []
list_data.append(picID)
# PosiTumourRatio
PosiTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_tumour_pts.csv')
OtherTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'other_tumour_pts.csv')
Num_PosiTumour = PosiTumourCsv.shape[0]
Num_OtherTumour = OtherTumourCsv.shape[0]
if (Num_PosiTumour + Num_OtherTumour)!=0 :
PosiTumourRatio = Num_PosiTumour / (Num_PosiTumour + Num_OtherTumour)
else:
PosiTumourRatio = 'error'
list_data.append(PosiTumourRatio)
# PosiLymphRatio
PosiLymphCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_lymph_pts.csv')
OtherLymphCsv = pd.read_csv( csv_dir+'/'+ picID +'other_lymph_pts.csv')
Num_PosiLymph = PosiLymphCsv.shape[0]
Num_OtherLymph = OtherLymphCsv.shape[0]
if (Num_PosiLymph + Num_OtherLymph)!=0 :
PosiLymphRatio = Num_PosiLymph / (Num_PosiLymph + Num_OtherLymph)
else:
PosiLymphRatio = 'error'
list_data.append(PosiLymphRatio)
# DisTumourLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
ptList3_csv = pd.read_csv(csv_dir+'/'+ picID +'other_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList4_csv = pd.read_csv(csv_dir+'/'+ picID +'other_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList3 = ptList3_csv.values[:,:2]
ptList4 = ptList4_csv.values[:,:2]
ptList1 = np.concatenate((ptList1,ptList3), axis=0)
ptList2 = np.concatenate((ptList2,ptList4), axis=0)
DisTumourLymph = calcuNearestPtsDis(ptList1, ptList2)
list_data.append(DisTumourLymph)
# DisPosiTumour
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
DisPosiTumour = calcuNearestPtsDis2(ptList1)
list_data.append(DisPosiTumour)
# DisPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
DisPosiLymph = calcuNearestPtsDis2(ptList1)
list_data.append(DisPosiLymph)
# DisPosiTumourPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
DisPosiTumourPosiLymph = calcuNearestPtsDis(ptList1, ptList2)
list_data.append(DisPosiTumourPosiLymph)
# DisTumourPosiLymph
ptList1_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csv_dir+'/'+ picID +'positive_lymph_pts.csv',usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
ptList3_csv = pd.read_csv(csv_dir+'/'+ picID +'other_tumour_pts.csv',usecols=['x_cord', 'y_cord'])
ptList3 = ptList3_csv.values[:,:2]
ptList1 = np.concatenate((ptList1,ptList3), axis=0)
DisTumourPosiLymph = calcuNearestPtsDis(ptList1, ptList2)
list_data.append(DisTumourPosiLymph)
csv_writer.writerow(list_data)
def adjustToMultiCSV():
'''
Divide the AllDisData.csv into 6+1=7 csv
'''
base_dir = '/data/Datasets/MediImgExp'
alldata = pd.read_csv( base_dir + '/' + 'AllDisData.csv' )
IndData = alldata['Ind'].values
patient_Ind = []
for IndName in IndData:
patient_Ind.append(IndName.split('-')[0])
patient_Ind = np.unique(patient_Ind)
patient_Ind = sorted( list(map(int,patient_Ind)) )
column_name = ['Ind','2D','3D','GAL9','LAG3','MHC','OX40','OX40L','PD1','PDL1','TIM3']
# stage 1 calculate the 6 csv (10 cols for each csv)
DisPosiTumour = pd.DataFrame(columns=column_name,index= patient_Ind)
DisPosiTumour['Ind'] = patient_Ind
patient_Id = patient_Ind
column_names = ['2D','3D','GAL9','LAG3','MHC','OX40','OX40L','PD1','PDL1','TIM3']
for patient in patient_Id:
for column in column_names:
combine_name = str(patient) + '-' + column
exist_flag = (alldata['Ind'].str[0:len(combine_name)]== combine_name).any()
if not exist_flag:
continue
valid_slice = alldata[ alldata['Ind'].str[0:len(combine_name)]== combine_name ]
arr = valid_slice['DisTumourPosiLymph'].values
if arr.__contains__('error'):
arr = np.setdiff1d(arr, ['error'])
if not arr.shape[0]:
continue
valid_slice_mean = np.mean( arr.astype(np.float32))
DisPosiTumour.loc[ patient ,column ] = valid_slice_mean
DisPosiTumour.to_csv( base_dir + '/' + 'DisTumourPosiLymph.csv',index=False )
# stage 2 add the outputs (4 cols)
all_data_name = base_dir + '/' + 'alldata2.csv'
all_data = pd.read_csv(all_data_name)
all_data.index = all_data['Ind']
valid_columns = ['RELAPSE','RFS','DEATH','OS']
valid_slice = all_data.loc[ patient_Ind, valid_columns ]
DisPosiTumour = pd.read_csv( base_dir + '/' + 'PosiTumourRatio.csv',index_col=0)
DisPosiTumour = pd.concat([DisPosiTumour,valid_slice],axis = 1)
DisPosiTumour.to_csv( base_dir + '/' + 'PosiTumourRatio.csv' )
# stage 3 calculate DisTumourLymph (use all markers' mean values)
DisTumourLymph =
|
pd.DataFrame(columns=['mean_10markers'],index= patient_Ind)
|
pandas.DataFrame
|
#------------------------------------------------------------------------------
'''Handy tools for global use.
- build cases quickly
- read geo_strings from DataFrames
- write/read csv files to DataFrames
- compare sets
- resequence columns in DataFrame
- determine if brackets/parentheses match in a string
- assert pandas Series/DataFrames
- natural sort strings
'''
# flake8 utils/tools.py --ignore=E265,E501,F841,N802,N803
import os
import re
import logging
import tempfile
import inspect
import warnings
import collections as ct
import pandas as pd
import pandas.util.testing as pdt
import lamana as la
# TODO: just import extensions? from config import EXTENSIONS as conf.EXTENSIONS
from lamana.utils import config
# # TODO: Replace with config.EXTENSIONS
# EXTENSIONS = ('.csv', '.xlsx')
# TODO: Add deprecation warning
def laminator(geos=None, load_params=None, mat_props=None, ps=[5], verbose=False):
'''Return a dict of Cases; quickly build and encase a suite of Case objects.
This is useful for tests requiring laminates with different thicknesses,
ps and geometries.
.. note:: Deprecate warning LamAna 0.4.10
`lamanator` will be removed in LamAna 0.5 and replaced by
`lamana.distributions.Cases` because the latter is more efficient.
Parameters
----------
geos : list; default `None`
Contains (optionally tuples of) geometry strings.
load_params : dict; default `None`
Passed-in geometric parameters if specified; else default is used.
mat_props : dict; default `None`
Passed-in materials parameters if specified; else default is used.
ps : list of int, optional; default 5
p values to be looped over; this sets the number of rows per DataFrame.
verbose : bool; default `False`
If True, print a list of Geometries.
See Also
--------
test_sanity#() : set of test functions that run sanity checks
utils.tools.select_frames() : utility function to parse DataFrames
Notes
-----
The preferred use for this function is the following:
>>> for case in cases:
... print(case.LMs)
[<lamana LaminateModel object (400-200-400S)>,
<lamana LaminateModel object (400-200-800)>],
[<lamana LaminateModel object (400-200-400S)>,
<lamana LaminateModel object (400-200-800)>]
>>> (LM for case in cases for LM in case.LMs)
<generator object>
Examples
--------
>>> from lamana.utils import tools as ut
>>> g = ('400-200-400S')
>>> case = ut.laminator(geos=g, ps=[2])
>>> LM = case[0]
>>> LM
<lamana LaminateModel object (400-200-400S)>
>>> g = ['400-200-400S', '400-200-800']
>>> cases = ut.laminator(geos=g, p=[2,3])
>>> cases
{0: <lamana.distributions.Case p=2>,
1: <lamana.distributions.Case p=3>,} # keys by p
>>> for i, case in cases.items(): # process cases
... for LM in case.LMs:
... print(LM.Geometry)
>>> (LM for i, LMs in cases.items() for LM in LMs) # generator processing
'''
# Default
if (geos is None) and (load_params is None) and (mat_props is None):
print('CAUTION: No Geometry or parameters provided to case builder. Using defaults...')
if geos is None:
geos = [('400-200-800')]
if isinstance(geos, str):
geos = [geos]
elif (geos is not None) and not (isinstance(geos, list)):
# TODO: use custom Exception
raise Exception('geos must be a list of strings')
if load_params is None:
''' UPDATE: pull from Defaults()'''
load_params = {
'R': 12e-3, # specimen radius
'a': 7.5e-3, # support ring radius
'p': 5, # points/layer
'P_a': 1, # applied load
'r': 2e-4, # radial distance from center loading
}
if mat_props is None:
mat_props = {
'HA': [5.2e10, 0.25],
'PSu': [2.7e9, 0.33],
}
# Laminates of different ps
'''Fix to output repr; may do this with an iterator class.'''
def cases_by_p():
for i, p in enumerate(ps):
'''raise exception if p is not int.'''
load_params['p'] = p
case = la.distributions.Case(load_params, mat_props)
case.apply(geos)
# Verbose printing
if verbose:
print('A new case was created. '
'# of LaminateModels: {}, p: {}'.format(len(geos), p))
#print('A new case was created. # LaminateModels: %s, ps: %s' % (len(geos), p))
#yield p, case
yield i, case
return dict((i, case) for i, case in cases_by_p())
# Helpers
def get_multi_geometry(Frame):
'''Return geometry string parsed from a multi-plied laminate DataFrame.
Uses pandas GroupBy to extract indices with unique values
in middle and outer. Splits the inner_i list by p. Used in controls.py.
Refactored for even multi-plies in 0.4.3d4.
Parameters
----------
Frame : DataFrame
A laminate DataFrame, typically extracted from a file. Therefore,
it is ambigouous whether Frame is an LFrame or LMFrame.
Notes
-----
Used in controls.py, extract_dataframe() to parse data from files.
See Also
--------
- get_special_geometry: for getting geo_strings of laminates w/nplies<=4.
'''
# TODO: Move to separate function in utils
def chunks(lst, n):
'''Split up a list into n-sized smaller lists; (REF 018)'''
for i in range(0, len(lst), n):
yield lst[i:i + n]
# TODO: why convert to int?; consider conversion to str
def convert_lists(lst):
'''Convert numeric contents of lists to int then str'''
return [str(int(i)) for i in lst]
#print(Frame)
group = Frame.groupby('type')
nplies = len(Frame['layer'].unique())
if nplies < 5:
raise Exception('Number of plies < 5. Use get_special_geometry() instead.')
p = Frame.groupby('layer').size().iloc[0] # should be same for each group
# Identify laminae types by creating lists of indices
# These lists must consider the the inner lists as well
# Final lists appear to contain strings.
# Access types by indices
if nplies % 2 != 0:
middle_group = group.get_group('middle')
inner_group = group.get_group('inner').groupby('side')
outer_group = group.get_group('outer')
# Convert to list of indices for each group
if nplies % 2 != 0:
mid_idx = middle_group.index.tolist()
in_idx = inner_group.groups['Tens.'] # need to split in chunks
out_idx = outer_group.index.tolist()
# Make lists of inner_i indices for a single stress side_
# TODO: Would like to make this inner_i splitting more robust
# TODO: better for it to auto differentiate subsets within inner_i
# NOTE: inner values are converting to floats somewhere, i.e. 400-200-800 --> 400-[200.0]-800
# Might be fixed with _gen_convention, but take note o the inconsistency.
# Looks like out_lst, in_lst, mid_lst are all floats. Out and mid convert to ints.
in_lst = []
for inner_i_idx in chunks(in_idx, p):
#print(inner_i_idx)
t = Frame.ix[inner_i_idx, 't(um)'].dropna().unique().tolist()
in_lst.append(t)
if nplies % 2 != 0:
mid_lst = Frame.ix[mid_idx, 't(um)'].dropna().unique().tolist()
in_lst = sum(in_lst, []) # flatten list
out_lst = Frame.ix[out_idx, 't(um)'].dropna().unique().tolist()
#print(out_lst, in_lst, mid_lst)
# Convert list thicknesses to strings
if nplies % 2 != 0:
mid_con = convert_lists(mid_lst)
else:
mid_con = ['0'] # for even plies
out_con = convert_lists(out_lst)
# Make geometry string
geo = []
geo.extend(out_con)
geo.append(str(in_lst))
geo.extend(mid_con)
geo_string = '-'.join(geo)
# TODO: format geo_strings to General Convention
# NOTE: geo_string comes in int-[float]-int format; _to_gen_convention should patch
geo_string = la.input_.Geometry._to_gen_convention(geo_string)
return geo_string
def get_special_geometry(Frame):
'''Return geometry string parsed from a special-plied (<5) laminate DataFrame.
Parameters
----------
Frame : DataFrame
A laminate DataFrame, typically extracted from a file. Therefore,
it is ambigouous whether Frame is an LFrame or LMFrame.
Notes
-----
Used in controls.py, extract_dataframe() to parse data from files.
See Also
--------
- get_multi_geometry: for getting geo_strings of laminates w/nplies>=5.
'''
#nplies = len(laminate['layer'].unique())
#geo = [
# str(int(thickness)) for thickness # gets unique values
# in laminate.groupby('type', sort=False)['t(um)'].first()
#]
nplies = len(Frame['layer'].unique())
geo = [
str(int(thickness)) for thickness # gets unique values
in Frame.groupby('type', sort=False)['t(um)'].first()
]
#print(geo)
# Amend list by plies by inserting 0 for missing layer type thicknesses; list required for .join
if nplies == 1:
#ply = 'Monolith'
geo.insert(0, '0') # outer
geo.insert(1, '0') # inner
elif nplies == 2:
#ply = 'Bilayer'
geo.append('0') # middle
geo.append('0')
elif nplies == 3:
#ply = 'Trilayer'
geo.insert(1, '0')
elif nplies == 4:
#ply = '4-ply'
geo.append('0')
# TODO: use join
geo[1] = '[' + geo[1] + ']' # redo inner in General Convention notation
else:
# TODO: use custom Exception
raise Exception('Number of plies > 4. Use get_multi_geometry() instead.')
#print('nplies:', nplies)
#print(geo)
geo_string = '-'.join(geo)
# TODO: format geo_strings to General Convention
geo_string = la.input_.Geometry._to_gen_convention(geo_string)
return geo_string
# TODO: Add extract_dataframe and fix_discontinuities here from controls.py; make tests.
# DEPRECATE: remove and replace with Cases() (0.4.11.dev0)
# Does not print cases accurately
# Did not fail test although alias given for name
#def get_frames(cases, name=None, nplies=None, ps=None):
# def select_frames(cases, name=None, nplies=None, ps=None):
# '''Yield and print a subset of case DataFrames given cases.
# Else, print all DataFrames for all cases.
# .. note:: DEPRECATE LamAna 0.4.11.dev0
# `lamanator` will be removed in LamAna 0.5 and replaced by
# `lamana.distributions.Cases` because the latter is more efficient.
#
# Parameters
# ----------
# cases : list of DataFrames
# Contains case objects.
# name : str
# Common name.
# nplies : int
# Number of plies.
# ps : int
# Number of points per layer.
# Examples
# --------
# >>> cases_selected = ut.select_frames(cases, name='Trilayer', ps=[])
# >>> LMs_list = list(cases) # capture generator contents
# >>> LMs_list = [LM for LM in cases_selected] # capture and exhaust generator
# >>> for LMs in cases_selected: # exhaust generator; see contents
# ... print(LMs)
# Notes
# -----
# This function is a predecessor to the modern Cases.select() method. It is
# no longer maintained (0.4.11.dev0), though possibly useful for extracting
# selected DataFrames from existing cases. Formerly `get_frames()`.
# See Also
# --------
# lamana.distributions.Cases.select() : canonical way to select df subsets.
# Yields
# ------
# DataFrame
# Extracted data from a sequence of case objects.
# '''
# # Default
# if ps is None:
# ps = []
# try:
# for i, case in enumerate(cases.values()): # Python 3
# print('case', i + 1)
# for LM in case.LMs:
# #print(LM.Geometry)
# #print(name, nplies, ps)
# # Select based on what is not None
# if not not ps: # if list not empty
# for p in ps:
# #print('p', p)
# if ((LM.name == name) | (LM.nplies == nplies)) & (LM.p == p):
# #print(LM.LMFrame)
# print(LM.Geometry)
# yield LM.LMFrame
# # All ps in the case suite
# elif ((LM.name == name) | (LM.nplies == nplies)):
# #print(LM.LMFrame)
# print(LM.Geometry)
# yield LM.LMFrame
# # No subset --> print all
# if (name is None) & (nplies is None) & (ps == []):
# #print(LM.LMFrame)
# print(LM.Geometry)
# yield LM.LMFrame
# finally:
# print('\n')
# print('Finished getting DataFrames.')
def compare_set(it, others, how='union', test=None):
'''Return a specific set of unique values based on `how` it is evaluated.
Wraps set operators from the standard library. Used to check values in demo.
Parameters
----------
it, others : iterable
A container of unique or non-unique values.
how : {'union', 'intersection', 'difference', 'symmetric_difference'}; default 'union'.
Determine which type of set to use. Applies set theory.
test : {'issubset', 'issuperset', 'isdisjoint'}; default `None`
Test the type of subset.
'''
# Defaults
if isinstance(it, int):
it = [it]
if isinstance(others, int):
others = [others]
if test is None:
test = ''
# Tests
# Subset: [1,2] < [1,2,3] -> True; [1,2] <= [1,2] -> True
if test.startswith('issub'):
return set(it).issubset(others)
# Superset: [1,2,3] > [1,2] -> True; [1,2,3] >= [1,2,3] -> True
if test.startswith('issuper'):
return set(it).issuperset(others)
# Disjoint: [1,2] , [3,4] -> True
if test.startswith('isdis'):
return set(it).isdisjoint(others)
# Set Theory
# Union: [1,2] | [3,4] -> {1,2,3,4}
if how.startswith('uni'):
return set(it).union(others)
# Intersection: [1] & [1,2] -> {1}
elif how.startswith('int'):
return set(it).intersection(others)
# Difference: [1,2,3] - [3,4] -> {1,2}
elif how.startswith('diff'):
return set(it).difference(others)
# Symmetric Difference: [1,2,3] ^ [3,4] -> {1,2,4}
elif how.startswith('symm'):
return set(it).symmetric_difference(others)
def ndframe_equal(ndf1, ndf2):
'''Return True if DataFrames (or Series) are equal; else False.
Parameters
----------
ndf1, ndf2 : Series or DataFrame
Two groups of data in pandas data structures.
'''
try:
if isinstance(ndf1, pd.DataFrame) and isinstance(ndf2, pd.DataFrame):
pdt.assert_frame_equal(ndf1, ndf2)
#print('DataFrame check:', type(ndf1), type(ndf2))
elif isinstance(ndf1, pd.Series) and isinstance(ndf2, pd.Series):
pdt.assert_series_equal(ndf1, ndf2)
#print('Series check:', type(ndf1), type(ndf2))
return True
except (ValueError, AssertionError, AttributeError):
return False
# Refactor to favor string as first arg (0.4.11.dev0)
def is_matched(string, pattern=None):
'''Return True if container brackets or parentheses have equal count; matched.
Parameters
----------
string : str
String to which the pattern in search.
pattern : str; Default None
Regular expression pattern. If None, defaults to test all characters i.e. '.'.
Notes
-----
This function was made to help validate parsed input strings.
Examples
--------
>>> s = 'Here the [brackets] are matched.'
>>> is_matched(s)
True
>>> s = 'Here the [brackets][ are NOT matched.'
>>> is_matched(s)
False
>>> s = 'Only accept [letters] in brackets that are [CAPITALIZED[.'
>>> p = '\W[A-Z]+\W' # regex for all only capital letters and non-alphannumerics
>>> is_matched(s, p)
False
'''
if pattern is None:
pattern = '.+' # default for all characters together (greedily)
bra, ket, par, ren = 0, 0, 0, 0
search = re.findall(pattern, string) # quick, non-iterative extraction
for item in search:
if ('[' in item) or (']' in item):
bra, ket = item.count('['), item.count(']')
if ('(' in item) or (')' in item):
par, ren = item.count('('), item.count(')')
#print(search, len(search))
#print('l_bracket: {0}, r_bracket: {1}, '
# 'l_paren {2}, r_paren: {3}'.format(bra, ket, par, ren))
return bra == ket and par == ren
# IO --------------------------------------------------------------------------
# IO-related functions
# DEPRECATE: verbose; use logging instead
def rename_tempfile(filepath, filename):
'''Return new file path; renames an extant file in-place.'''
dirpath = os.path.dirname(filepath)
new_filepath = os.path.join(dirpath, filename)
new_filepath = get_path(validate=new_filepath)
os.rename(filepath, new_filepath)
return new_filepath
# TODO: Add write functions from controls.py here
def convert_featureinput(FI):
'''Return FeaureInput dict with converted values to Dataframes.
Can accept almost any dict. Converts to DataFrames depending on type.
Returns
-------
defaultdict
Values are DataFrames.
'''
logging.info('Converting FeatureInput values to DataFrames: {}...'.format(
FI.get('Geometry')))
dd = ct.defaultdict(list)
for k, v in FI.items():
if isinstance(v, dict):
try:
# if dict of dicts
dd[k] = pd.DataFrame(v).T
except(ValueError):
# if regular dict, put in a list
dd[k] = pd.DataFrame([v], index=[k]).T
finally:
logging.debug('{0} {1} -> df'.format(k, type(v)))
elif isinstance(v, list):
dd[k] = pd.DataFrame(v, columns=[k])
logging.debug('{0} {1} -> df'.format(k, type(v)))
elif isinstance(v, str):
dd[k] = pd.DataFrame({'': {k: v}})
logging.debug('{0} {1} -> df'.format(k, type(v)))
elif isinstance(v, la.input_.Geometry):
v = v.string # get geo_string
dd[k] = pd.DataFrame({'': {k: v}})
logging.debug('{0} {1} -> df'.format(k, type(v)))
elif isinstance(v, pd.DataFrame): # sometimes materials is df
dd[k] = v
logging.debug('{0} {1} -> df'.format(k, type(v)))
elif not v: # empty container
dd[k] = pd.DataFrame()
logging.debug('{0} {1} -> empty df'.format(k, v))
else:
logging.debug('{0} -> Skipped'.format(type(v))) # pragma: no cover
return dd
def reorder_featureinput(d, keys=None):
'''Return an OrderedDict given a list of keys.
Parameters
----------
d : dict
Any dict; expects a FeatureInput.
keys : list of strings, default None
Order of keys of a FeatureInput.
Examples
--------
>>> case = ut.laminator(dft.geos_standard)[0]
>>> LM = case.LMs[0]
>>> FI = LM.FeatureInput
>>> # Default order
>>> fi = reorder_featureinput(FI)
>>> list(fi.keys())
['Geometry', 'Model', 'Materials', 'Parameters', 'Globals', 'Properties']
>>> # Manage key order
>>> rev_keys = reversed(['Geometry', 'Model', 'Materials',
... 'Parameters', 'Globals', 'Properties'])
>>> fi = reorder_featureinput(FI, keys=rev_keys)
>>> list(fi.keys())
['Properties', 'Globals', 'Parameters', 'Materials', 'Model', 'Geometry']
>>> # Add missing keys (in random order)
>>> fi = reorder_featureinput(FI, [Model', 'Geometry'])
>>> list(fi.keys())
['Model', 'Geometry', 'Materials', 'Parameters', 'Globals', 'Properties']
Notes
-----
- Keys are optional; assumes a typical FeatureInput with default keys.
- If passed keys are shorter the FI keys, ignores empty entries.
- Groups single string entries (i.e. Geometry, Model) upfront for dashboard.
- Properties are last as the materials expand expand column-wise.
'''
# Default keys for standard FeatureInput
if keys is None:
keys = ['Geometry', 'Model', 'Materials', 'Parameters',
'Globals', 'Properties']
od = ct.OrderedDict()
for key in keys:
if key in d: # skip Globals in Laminate.FeaturInput
od[key] = d[key]
# If keys is shorter than FI.keys(), tag on the missing keys
for k in d.keys():
if k not in od:
od[k] = d[k]
return od
def get_path(filename=None, prefix=None, suffix=None, overwrite=True,
dashboard=False, validate=None,):
'''Return the default export path or a file path if given a filename.
Verifies existing paths, else returns an new path.
Parameters
----------
filename : str, default None
File name.
prefix : str, default None
File name prefix.
suffix : |'.csv'|'.xlsx'|, default None
File name extension.
overwrite : bool, default True
Toggle of overwrite protection. If False, increments filename.
dashboard : bool, default False
Auto-append 'dash_' to filename. Flag a dashboard is being made;
only .csv files supportted.
validate : str, default None, optional
Verifies if full file path exists; if so, return incremented file path.
Notes
-----
Need to return different types of paths depending on output file. Here is
what this function can do:
- OK Standardize the default "\export" directory
- OK Give path for csv data file
- OK Give path for csv dashboard file; prepend "dash_"
- OK Give path for xlsx file only (no dashboard)
- OK Support overwrite protection of pre-existing files
- OK Reprocess paths for temporary files
- X Join path components and return a safe path
- X Accept directory paths arg to override the default path; security issue
Key Terms:
* currpath = current working directory
* dirpath = full path - base name
* filepath = full path (includes suffix)
* basename = prefix + filename + suffix
* filename = base name - suffix - prefix
Raises
------
OSError : verify working directory starts at the package root prior writing.
Returns
-------
str
Default export directory path, unless given other information
'''
# Helpers -----------------------------------------------------------------
def protect_overwrite(filepath):
'''Return a new filepath by looping existing files and incrementing.
Notes
-----
- Check for duplicates before writing; overwrite protection
- Read dir if file exists. Append counter to path name if exists.
'''
basename = os.path.basename(filepath)
dirpath = os.path.dirname(filepath)
counter = 1
# Edit basename
while os.path.isfile(filepath):
##suffix = [ext for ext in EXTENSIONS if basename.endswith(ext)][0]
##filename = basename.replace(suffix, '')
filename, suffix = os.path.splitext(basename)
logging.debug('filename: {}, suffix: {}'.format(filename, suffix))
increment = ''.join(['(', str(counter), ')'])
filename = ''.join([filename, increment])
logging.info("Overwrite protection: filename exists."
" Incrementing name to '{}' ...".format(filename))
# Pretend the default directory path with a simple recursive call
##filepath = get_path(filename=filename, suffix=suffix, overwrite=True) # or inifinite loop
filepath = os.path.join(dirpath, ''.join([filename, suffix]))
counter += 1
return filepath
# Reset Defaults ----------------------------------------------------------
if filename is None:
filename = ''
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
elif suffix.endswith('csv'):
suffix = config.EXTENSIONS[0]
elif suffix.endswith('xlsx'):
suffix = config.EXTENSIONS[1]
# Set Root/Source/Default Paths -------------------------------------------
# The export folder is relative to the root (package) path
# TODO: replace with config.DEFAULTPATH
sourcepath = os.path.abspath(os.path.dirname(la.__file__))
packagepath = os.path.dirname(sourcepath)
if not os.path.isfile(os.path.join(packagepath, 'setup.py')):
raise OSError(
'Package root path location is not correct: {}'
' Verify working directory is ./lamana.'.format(packagepath)
)
defaultpath = os.path.join(packagepath, 'export')
dirpath = defaultpath
logging.debug('Root path: {}'.format(packagepath))
if not filename and (suffix or dashboard):
logging.warn("Missing 'filename' arg. Using default export directory ...")
# File Path ---------------------------------------------------------------
if validate:
# Just check if exists. Give new filepath is so.
return protect_overwrite(validate)
if filename:
prefix = 'dash_'if dashboard and suffix.endswith('csv') else ''
if dashboard and not suffix.endswith('csv'):
logging.info('Only .csv files support separate dashboards.'
' Using default export directory...')
if not suffix:
logging.warn('Missing suffix. No action taken.')
basename = ''.join([prefix, filename, suffix])
filepath = os.path.join(dirpath, basename)
if not overwrite:
return protect_overwrite(filepath)
return filepath
return dirpath
def export(L_, overwrite=False, prefix=None, suffix=None, order=None,
offset=3, dirpath=None, temp=False, keepname=True, delete=False):
'''Write LaminateModels and FeatureInput to files; return a tuple of paths.
Supported formats:
- .csv: two files; separate data and dashboard files
- .xlsx: one file; data and dashboard sheets
Parameters
----------
L_ : Laminate-like object
Laminate or subclass containing attributes and calculations.
overwrite : bool; default False
Save over files with the same name. Prevents file incrementation
and excess files after cyclic calls.
prefix : str; default None
Prepend a prefix to the filename. Conventions are:
- '' : legacy or new
- 'w': written by the package
- 't': temporary file; used when tempfile is renamed
- 'r': redone; altered from legacy
- 'dash': dashboard
suffix : |'.csv'|'.xlsx'|
Determines the file format by appending to filename; default '.xlsx'.
order: list
Keys of the FeatureInput.
offset : int
Blank columns between data in the dashboard.
dirpath : str, optional; default "/export" directory
Directory path to store resulting csv files; custom path NotImplemented.
temp : bool, default False
Make temporary files in the OS Temp directory instead.
keepname : bool, True
Toggle renaming temporary files; temp must be True.
delete : bool, default False
Force file removal after created; mainly used for temporary files.
Returns
-------
tuple
Full file paths (str) of the created files LM and dashboard data.
See Also
--------
- get_path(): deals with munging paths and validations
- convert_featureinput(): convert dict values to DataFrames
- reorder_featureinput(): make and ordered list for the dashboard
- make_tempfile(): review how Python 'mkstemp' makes temp; NotImplemented
- rename_tempfile(): rename the file post closing file.
Notes
-----
Contents are written into an "/export" directory. FeatureInput data a.k.a "dashboard".
We use mkstemp (low-level), which leaves it open for to_excel to write.
Here are technical characteristics:
- OK Outputs different file formats.
- OK Writes regular or temporary files (get auto-deleted by the OS; for tests)
- OK Calls helper functions to clean paths and datastructures.
- OK Allows prefixing for file indentification.
- OK Outputs data and dashboards.
- OK Works even when files exist in the directory.
- OK Auto creates "\export" directory if none exists.
- OK Renames temporary files by default.
- OK Support Laminate and LaminateModel objects
- X Supports custom directory paths.
Examples
--------
>>> from lamana.utils import tools as ut
>>> case = ut.laminator('400.0-[200.0]-800.0')[0]
>>> LM = case.LMs[0]
>>> export(LM)
'~/lamana/export/laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0.xlsx'
>>> # Overwrite Protection
>>> export(LM, overwrite=False)
'~/lamana/export/laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0(1).xlsx'
>>> # Optional .csv Format
>>> export(LM, suffix='.csv')
'~/lamana/export/dash_laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0.csv'
'~/lamana/export/laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0.csv'
>>> # Optional Temporary Files
>>> export(LM, suffix='.csv', temp=True)
'temp/t_dash_laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0.csv'
'temp/t_laminatemodel_5ply_p5_t2.0_400.0-[200.0]-800.0.csv'
>>> # Supports Laminate objects too
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> L = la.constructs.Laminate(dft.FeatureInput)
>>> export(L)
'~/lamana/export/dash_laminate_5ply_p5_t2.0_400.0-[200.0]-800.0.xlsx'
'''
# Parse for Filename ------------------------------------------------------
nplies = L_.nplies
p = L_.p
# TODO: Fix units
t_total = L_.total * 1e3 # (in mm)
geo_string = L_.Geometry.string
FI = L_.FeatureInput
# Path Munge --------------------------------------------------------------
if prefix is None:
prefix = ''
if suffix is None:
suffix = config.EXTENSIONS[1] # .xlsx
if dirpath is None:
###
# Prepend files with 'w' for "written" by the package
# NOTE: removed default w_ prefix; check the control and other uses to maintain coding
# TODO: rename legacy files with "l_"
###
if hasattr(L_, 'LMFrame'):
kind = 'laminatemodel'
else:
kind = 'laminate'
filename = r'{}{}_{}ply_p{}_t{:.1f}_{}'.format(
prefix, kind, nplies, p, t_total, geo_string)
# Force-create export directory or path (REF 047)
# Send file to export directory
defaultpath = get_path()
if not os.path.exists(defaultpath):
# TODO: Make sure this log prints out
logging.info(
'No default export directory found. Making directory {} ...'.format(defaultpath)
)
os.makedirs(defaultpath)
else:
raise NotImplementedError('Custom directory paths are not yet implemented.')
# Prepare FeatureInput ----------------------------------------------------
if order is None:
order = ['Geometry', 'Model', 'Materials',
'Parameters', 'Globals', 'Properties'] # default
converted_FI = convert_featureinput(FI)
reordered_FI = reorder_featureinput(converted_FI, order) # elevates strings
dash_df =
|
pd.concat(converted_FI)
|
pandas.concat
|
# import pyedflib
import numpy as np
from scipy import signal as sg
import argparse
import sys
import json
# import matplotlib.pyplotmatplot as plt
from pprint import pprint
import pandas as pd
class Notch():
Q = 0
f0 = 0
def __init__(self,f0=60,Q=50):
self.f0=f0
self.Q=Q
def argparse(self):
parser = argparse.ArgumentParser()
parser.add_argument('-i','--archivo',help='Ingrese el nombre del archivo .edf a utilizar',type = str)
parser.add_argument('-fo','--fo',help='Frecuencia que se desea filtrar. Por defecto fo = 60',type = float)
parser.add_argument('-Q','--Q',help='Factor de calidad del filtro. Por defecto Q = 50',type = int)
parser.add_argument('-e','--edf',help='Nombre y dirección del archivo .edf de salida',type = str)
parsedargs = parser.parse_args()
arc = parsedargs.archivo
output = parsedargs.edf
if (parsedargs.fo != None):
if (parsedargs.fo> 0):
self.f0 = parsedargs.fo
if (parsedargs.Q != None):
if (parsedargs.Q>0):
self.Q = parsedargs.Q
return arc,output
# def read_edf(self,nameEdf):
# '''
# Descripción: Se encarga de leer el archivo .edf
# Entradas: - nameEdf: nombre del archivo .edf
# Salidas: - in_signal: Matriz de Canales X Tiempo
# - fs: Frecuencia de muestro
# - headers: Etiquetas del archivo .edf
# '''
# edf = pyedflib.EdfReader(nameEdf)
# headers = edf.getSignalHeaders()
# nch = edf.signals_in_file
# nsig = edf.getNSamples()[0]
# fs = edf.getSampleFrequency(0)
# in_signal = np.zeros((nch,nsig))
# for x in range(nch):
# in_signal[x,:] = edf.readSignal(x)
# edf._close()
# del edf
# return in_signal,fs,headers
def filt(self,in_signal,fs):
'''
Descripción: Se encarga de filtrar los datos del EEG
Entradas: - in_signal: Matriz de Canales X Tiempo
- fs: Frecuencia de muestro
Salidas: - out_signal: EEG filtrado (Matriz de CanalesXTiempo)
'''
w0 = self.f0/(fs/2)
num,den = sg.iirnotch(w0,self.Q)
out_signal = np.zeros((len(in_signal),len(in_signal[0])))
for i in range(0,len(in_signal)):
out_signal[i]=sg.filtfilt(num,den,in_signal[i])
return out_signal,num,den
# def write_edf(self,in_signal,headers,nameEdf):
# '''
# Descripción: Se encarga de escribir los datos del nuevo EEG
# Entradas: - headers: etiquetas del .edf
# - in_signal: Matriz de Canales X Tiempo
# - nameEdf : Nombre con el que se desea guardar el nuevo .edf
# '''
# edf = pyedflib.EdfWriter(nameEdf,len(in_signal),file_type=pyedflib.FILETYPE_EDFPLUS)
# edf_info = []
# edf_signal = []
# for i in range (len(in_signal)):
# channel_info={'label':headers[i]['label'],'dimension':headers[i]['dimension'],'sample_rate':headers[i]['sample_rate'],'physical_max':headers[i]['physical_max'] , 'physical_min': headers[i]['physical_min'], 'digital_max': headers[i]['digital_max'], 'digital_min': headers[i]['digital_min'], 'transducer':headers[i]['transducer'] , 'prefilter':headers[i]['prefilter']+',notch '+str(self.f0)+'Hz'}
# edf_info.append(channel_info)
# edf_signal.append(in_signal[i])
# edf.setSignalHeaders(edf_info)
# edf.writeSamples(edf_signal)
# edf.close()
# del edf
#Read data from stdin
def read_in():
lines = sys.stdin.readlines()
#Since our input would only be having one line, parse our JSON data from that
return json.loads(lines[0])
if __name__ == '__main__':
notch1 = Notch()
# argparse input mode
# print ("start of notch")
# arc,output = notch1.argparse()
# signal , fs ,headers= notch1.read_edf(arc)
# filtered_signal,num,den = notch1.filt(signal[:,232250:234750],fs)
# print("size of output",filtered_signal.shape)
# print(vals)
# print("size of input",in_signal.shape)
# fig,subplt=plt.subplots(3,1,figsize=(8,5))
# subplt[0].plot(t,inp[9][ni:nf])
# subplt[0].title.set_text('Señal original')
# subplt[0].grid()
#notch1.write_edf(filtered_signal,headers,output)
# python-shell input mode
inSignals=read_in()
nch=len(inSignals)
nSamples = len(inSignals[0]['data'])
fs=inSignals[0]['samplefrequency']
# print(nch,nSamples)
in_signal = np.zeros((nch,nSamples))
# print(len(inSignals))
# print(len(inSignals[0]['data']))
currentCh=0
for item in inSignals:
for subitem in item['data']:
subitem.pop('time', None)
df =
|
pd.DataFrame(item['data'])
|
pandas.DataFrame
|
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
from simRunSensitivityApr15 import simGenerator
pd.options.display.max_columns = 100
class senAnalyze:
colorFile = 'colors_dict_April_15_2021.xlsx'
incomeDict = {'Low': 9000, 'Medium': 19500, 'High': 1000000}
Age = {'Under 65': 65, 'Above 65': 65}
gridBG = '#f2f2f2'
def __init__(self):
self.sim = simGenerator()
self.res_df = self.sim.generateSimulation()
self.colorExcel = pd.read_excel(senAnalyze.colorFile)
self.res_df['stay'] = 0
self.res_df['stay'] = self.res_df['status'].apply(
lambda x: 1 if x == 'stay' else 0)
self.res_df['leave'] = 0
self.res_df['leave'] = self.res_df['status'].apply(
lambda x: 1 if x == 'leave' else 0)
self.res_df['New Comers'] = 0
self.res_df['New Comers'] = self.res_df['status'].apply(
lambda x: 1 if x == 'New Comers' else 0)
self.res_df.fillna(0, inplace=True)
self.res_df['Under 65'] = 0
self.res_df['Above 65'] = 0
self.res_df['Low Income'] = 0
self.res_df['Medium Income'] = 0
self.res_df['High Income'] = 0
self.res_df['Under 65'] = self.res_df['age'].apply(
lambda x: 1 if x < 65 else 0)
self.res_df['Above 65'] = self.res_df['age'].apply(
lambda x: 1 if x >= 65 else 0)
self.res_df['Low Income'] = self.res_df['income'].apply(
lambda x: 1 if (x < senAnalyze.incomeDict['Low']) else 0)
self.res_df['Medium Income'] = self.res_df['income'].apply(
lambda x: 1 if (x >= senAnalyze.incomeDict['Low']) & (x < senAnalyze.incomeDict['Medium']) else 0)
self.res_df['High Income'] = self.res_df['income'].apply(
lambda x: 1 if (x >= senAnalyze.incomeDict['Medium']) else 0)
self.cols_keep = ['aprtmentSize', 'ProjNumber', 'yearsInBldg', 'age', 'rent', 'own', 'agentID', 'prjectType', 'tic', 'status',
'CostForStaying', 'rentPrice', 'stay', 'leave', 'New Comers', 'Under 65', 'Above 65', 'Low Income', 'Medium Income', 'High Income']
self.cols_stat = ['aprtmentSizeMean', 'ProjNumber', 'yearsInBldgMean', 'aprtmentSizeMeanStay', 'aprtmentSizeNewComer', 'AgeMean', 'AgeMeanNew', 'AgeMeanStay', 'AgeMeanLeave', 'AgeOldStayNew', 'AgeYoungStayNew', 'AgeOldStay', 'AgeYoungStay', 'AgeOldNew', 'AgeYoungNew', 'IncomeMean', 'IncomeMeanStay', 'IncomeMeanNew', 'IncomeMeanLeave', 'IncomeHighStay', 'IncomeMedStay',
'IncomeLowStay', 'IncomeHighNew', 'IncomeMedNew', 'IncomeLowNew', 'IncomeHighStayNew', 'IncomeMedStayNew', 'IncomeLowStayNew', 'meanIncomeStay', 'meanIncomeNewComers', 'meanIncomeStay_N_new', 'rentCount', 'ownCount', 'rentStayCount', 'rentNewCount', 'ownStayCount', 'ownNewCount', 'TotalAgentsCount', 'prjectType', 'tic', 'stay', 'new comers', 'CostForStaying', 'rentPrice']
self.res2 =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
class TestDataFrameTruncate:
def test_truncate(self, datetime_frame, frame_or_series):
ts = datetime_frame[::3]
if frame_or_series is Series:
ts = ts.iloc[:, 0]
start, end = datetime_frame.index[3], datetime_frame.index[6]
start_missing = datetime_frame.index[2]
end_missing = datetime_frame.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
tm.assert_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
tm.assert_equal(truncated, expected)
# corner case, empty series/frame returned
truncated = ts.truncate(after=ts.index[0] - ts.index.freq)
assert len(truncated) == 0
truncated = ts.truncate(before=ts.index[-1] + ts.index.freq)
assert len(truncated) == 0
msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq
)
def test_truncate_copy(self, datetime_frame):
index = datetime_frame.index
truncated = datetime_frame.truncate(index[5], index[10])
truncated.values[:] = 5.0
assert not (datetime_frame.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self, frame_or_series):
# GH#17935
obj = DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
if frame_or_series is Series:
obj = obj["A"]
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
obj.truncate(before=3, after=9)
def test_sort_values_nonsortedindex(self):
# TODO: belongs elsewhere?
rng = date_range("2011-01-01", "2012-01-01", freq="W")
ts = DataFrame(
{"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values("A", ascending=False).truncate(
before="2011-11", after="2011-12"
)
def test_truncate_nonsortedindex_axis1(self):
# GH#17935
df = DataFrame(
{
3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5),
},
columns=[3, 20, 2, 0],
)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
df.truncate(before=2, after=20, axis=1)
@pytest.mark.parametrize(
"before, after, indices",
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
@pytest.mark.parametrize("klass", [Int64Index, DatetimeIndex])
def test_truncate_decreasing_index(
self, before, after, indices, klass, frame_or_series
):
# https://github.com/pandas-dev/pandas/issues/33756
idx = klass([3, 2, 1, 0])
if klass is DatetimeIndex:
before = pd.Timestamp(before) if before is not None else None
after =
|
pd.Timestamp(after)
|
pandas.Timestamp
|
from unittest import TestCase
from collections import Counter
import pandas as pd
import numpy as np
from scripts.vars import CONDITIONAL, TAG, BORDERLINE, SAFE, NOISY
from scripts.utils import add_tags, Bounds
import scripts.vars as my_vars
class TestAddTags(TestCase):
"""Tests add_tags() from utils.py"""
def test_add_tags_safe_borderline(self):
"""Add tags when using nominal and numeric features assigning borderline and safe as tags"""
df = pd.DataFrame({"A": ["low", "low", "high", "low", "low", "high"], "B": [1, 1, 4, 1.5, 0.5, 0.75],
"C": [3, 2, 1, .5, 3, 2],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
lookup = \
{
"A":
{
'high': 2,
'low': 4,
CONDITIONAL:
{
'high':
Counter({
'banana': 2
}),
'low':
Counter({
'banana': 2,
'apple': 2
})
}
}
}
correct = pd.DataFrame({"A": ["low", "low", "high", "low", "low", "high"], "B": [1, 1, 4, 1.5, 0.5, 0.75],
"C": [3, 2, 1, .5, 3, 2],
"Class": ["apple", "apple", "banana", "banana", "banana", "banana"],
TAG: [BORDERLINE, BORDERLINE, SAFE, BORDERLINE, BORDERLINE, BORDERLINE]
})
my_vars.closest_rule_per_example = {}
my_vars.closest_examples_per_rule = {}
my_vars.seed_rule_example = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
my_vars.seed_example_rule = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
# Note: examples_covered_by_rule implicitly includes the seeds of all rules
my_vars.examples_covered_by_rule = {}
classes = ["apple", "banana"]
min_max = pd.DataFrame({"C": {"min": 1, "max": 5}, "B": {"min": 1, "max": 11}})
k = 3
rules = [
pd.Series({"A": "low", "B": Bounds(lower=1, upper=1), "C": Bounds(lower=3, upper=3), "Class": "apple"},
name=0),
pd.Series({"A": "low", "B": Bounds(lower=1, upper=1), "C": Bounds(lower=2, upper=2), "Class": "apple"},
name=1),
pd.Series({"A": "high", "B": Bounds(lower=4, upper=4), "C": Bounds(lower=1, upper=1),
"Class": "banana"}, name=2),
pd.Series({"A": "low", "B": Bounds(lower=1.5, upper=1.5), "C": Bounds(lower=0.5, upper=0.5),
"Class": "banana"}, name=3),
pd.Series({"A": "low", "B": Bounds(lower=0.5, upper=0.5), "C": Bounds(lower=3, upper=3),
"Class": "banana"}, name=4),
pd.Series({"A": "high", "B": Bounds(lower=0.75, upper=0.75), "C": Bounds(lower=2, upper=2),
"Class": "banana"}, name=5)
]
my_vars.all_rules = {0: rules[0], 1: rules[1], 2: rules[2], 3: rules[3], 4: rules[4], 5: rules[5]}
tagged = add_tags(df, k, rules, class_col_name, lookup, min_max, classes)
# Due to floating point precision, use approximate comparison
self.assertTrue(tagged.equals(correct))
def test_add_tags_noisy_safe(self):
"""Add tags when using nominal and numeric features and assigning noisy and safe as tags"""
df = pd.DataFrame({"A": ["low", "low", "high", "low", "low", "high"], "B": [1, 1, 4, 1.5, 0.5, 0.75],
"C": [3, 2, 1, .5, 3, 2],
"Class": ["apple", "banana", "banana", "banana", "banana", "banana"]})
class_col_name = "Class"
lookup = \
{
"A":
{
'high': 2,
'low': 4,
CONDITIONAL:
{
'high':
Counter({
'banana': 2
}),
'low':
Counter({
'banana': 2,
'apple': 2
})
}
}
}
correct = pd.DataFrame({"A": ["low", "low", "high", "low", "low", "high"], "B": [1, 1, 4, 1.5, 0.5, 0.75],
"C": [3, 2, 1, .5, 3, 2],
"Class": ["apple", "banana", "banana", "banana", "banana", "banana"],
TAG: [NOISY, BORDERLINE, SAFE, SAFE, SAFE, SAFE]
})
classes = ["apple", "banana"]
my_vars.closest_rule_per_example = {}
my_vars.closest_examples_per_rule = {}
my_vars.seed_rule_example = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
my_vars.seed_example_rule = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
# Note: examples_covered_by_rule implicitly includes the seeds of all rules
my_vars.examples_covered_by_rule = {}
min_max =
|
pd.DataFrame({"C": {"min": 1, "max": 5}, "B": {"min": 1, "max": 11}})
|
pandas.DataFrame
|
from abc import ABC, abstractmethod
from pandas import DataFrame
import json
from kestrel.exceptions import KestrelInternalError
class AbstractDisplay(ABC):
@abstractmethod
def to_string(self):
pass
@abstractmethod
def to_html(self):
pass
@abstractmethod
def to_json(self):
pass
@abstractmethod
def to_dict(self):
pass
class DisplayDataframe(AbstractDisplay):
def __init__(self, data):
if isinstance(data, DataFrame):
self.dataframe = data
else:
try:
self.dataframe =
|
DataFrame(data)
|
pandas.DataFrame
|
from ruffus import *
import pandas as pd
from util.util import file_by_type
import datatables.traveltime
import pipeline.data_bt
import pipeline.data_vs
# BLUETH_YYYYMMDD.traveltime, VSDATA_YYYYMMDD.volume -> YYYYMMDD.merged
@collate([pipeline.data_bt.import_bt, pipeline.data_vs.import_vs],
regex(r"^data/(BLUETH|VSDATA)_(\d{8})\.(traveltime|volume)$"),
r"data/\2.merged")
def merge_data(infiles, mergefile):
assert (len(infiles) == 2), "Expected exactly 2 files (BLUETH_... and VSDATA_...) to merge"
bt_f = file_by_type(infiles, '.traveltime') # 'BLUETH_...'
vs_f = file_by_type(infiles, '.volume') # 'VSDATA_...'
bt = pd.read_csv(bt_f, header=None, names=['t', 'travel time'])
vs =
|
pd.read_csv(vs_f, header=None, names=['t', 'volume'])
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import os
import csv
import re
import itertools
import time
import sys
from CNN import CNN
import tensorflow as tf
from hashtag_separator import get_word_vectors
from data_loader import load_glove_data
def trainCNN(tweets, train_tweets_ind, x_valid, y_valid, y_val, glove):
"""
Trains CNN
INPUT:
tweets: Dataframe with tweets
train_tweets_ind: shuffled indices of training dataset
x_valid: tweets for validation
y_valid: labels for tweets for validation
y_val: matrix containing label for each tweet
glove: glove dictionary
OUTPUT:
path: path to the last saved checkpoint
"""
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
cnn = CNN()
global_step = tf.Variable(0, name="global_step", trainable=False)
learning_rate = tf.train.exponential_decay(5e-4, global_step, 500, 0.97, staircase=True, name='learning_rate')
train_op = tf.train.AdamOptimizer(learning_rate, name='optimizer').minimize(cnn.loss, global_step=global_step, name='optim_operation')
# Use timestamps for summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join("../data/models/", timestamp))
# Loss, train and validation accuracy summaries for visualization
loss_summary = tf.summary.scalar('loss', cnn.loss)
acc_summary = tf.summary.scalar('accuracy', cnn.accuracy)
lambda_summary = tf.summary.scalar('learning_rate', learning_rate)
train_summary_op = tf.summary.merge([loss_summary, acc_summary, lambda_summary], name='training_summaries')
train_summary_dir = os.path.join(out_dir, 'summaries', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
valid_summary_op = tf.summary.merge([loss_summary, acc_summary], name='validation_summaries')
valid_summary_dir = os.path.join(out_dir, 'summaries', 'validation')
valid_summary_writer = tf.summary.FileWriter(valid_summary_dir, sess.graph)
# Checkpoint
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.all_variables(), max_to_keep=50, name='saver')
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
feed_dict = {cnn.x: x_batch, cnn.y: y_batch, cnn.dropout_prob: 0.5}
_, step, summaries, loss, accuracy = sess.run([train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy], feed_dict)
print('step %d, loss %.3f, accuracy %.2f' %(step,loss,100*accuracy))
train_summary_writer.add_summary(summaries, step)
def valid_step(x_batch, y_batch):
feed_dict = {cnn.x: x_batch, cnn.y: y_batch, cnn.dropout_prob:1.0}
step, summaries, loss, accuracy, pred = sess.run([global_step, valid_summary_op, cnn.loss, cnn.accuracy, cnn.y_pred], feed_dict)
print('step %d, loss %.3f, accuracy %.2f' %(step,loss,100*accuracy))
valid_summary_writer.add_summary(summaries, step)
for epoch in range(30):
for batch_ind in batch_iter(train_tweets_ind, 1024):
minibatch_x = get_word_vectors(tweets.loc[batch_ind], glove)
minibatch_y = y_val[batch_ind, :]
train_step(minibatch_x, minibatch_y)
current_step = tf.train.global_step(sess, global_step)
if current_step % 20 == 0:
print("\nEvaluation:")
valid_step(x_valid, y_valid)
if current_step % 1000 == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
return path
def batch_iter(train_tweets_ind, batch_size):
"""
Batch iterator
INPUT:
train_tweets_ind: indices for tweets to take in the batch
batch_size: size of batch
"""
n_ind = len(train_tweets_ind)
shuffled_indices = np.random.permutation(train_tweets_ind)
for batch_num in range(int(np.ceil(n_ind/batch_size))):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, n_ind)
if start_index != end_index:
yield shuffled_indices[start_index:end_index]
def eval_from_checkpoint(test_tweets, path, glove):
"""
Evaluates predictions based on saved checkpoint
INPUT:
test_tweets: Dataframe with test tweets
path: location of checkpoint
glove: glove dictionary
OUTPUT:
test_predictions: predictions of test tweets
"""
test_embeddings = get_word_vectors(test_tweets, glove)
graph = tf.Graph()
with graph.as_default():
checkpoint_file = tf.train.latest_checkpoint(path)
sess = tf.Session()
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
x = graph.get_operation_by_name("embedding").outputs[0]
dropout_prob = graph.get_operation_by_name("dropout_prob").outputs[0]
predictions = graph.get_operation_by_name("softmax/predicted_classes").outputs[0]
test_predictions = sess.run(predictions, {x:test_embeddings, dropout_prob:1.0})
test_predictions[test_predictions==0] = -1
return test_predictions
def create_submission(y_pred):
"""
Creates submission file
INPUT:
y_pred: list of predictions of test tweets
"""
with open("../output/submission.csv", 'w') as csvfile:
fieldnames = ['Id', 'Prediction']
writer = csv.DictWriter(csvfile, delimiter=",", fieldnames=fieldnames)
writer.writeheader()
i = 1
for y in y_pred:
writer.writerow({'Id':int(i),'Prediction':y})
i += 1
def main():
if len(sys.argv) != 2 or (sys.argv[1] not in ['train', 'eval']):
print("Invalid command. Expected 'train' or 'eval'.")
return
if sys.argv[1] == 'train':
if not os.path.exists('../data/parsed/test_full.csv'):
print('test_full.csv doesn\'t exist')
return None
tweets_test = pd.read_csv('../data/parsed/test_full.csv', names=['id', 'tweet'])
tweets_test = tweets_test.drop(columns=['id'])
if not os.path.exists('../data/parsed/train_pos_full.csv'):
print('train_pos_full.csv doesn\'t exist')
return None
if not os.path.exists('../data/parsed/train_neg_full.csv'):
print('train_neg_full.csv doesn\'t exist')
return None
pos = pd.read_csv('../data/parsed/train_pos_full.csv', names=['tweet'])
pos['sentiment']=1
neg =
|
pd.read_csv('../data/parsed/train_neg_full.csv', names=['tweet'])
|
pandas.read_csv
|
import pandas as pd
import os
import warnings
def read_checkm_output(taxonomy_table, completness_table):
c_df = pd.read_csv(completness_table, index_col=0,sep='\t')[
["Completeness", "Contamination", "Strain heterogeneity"]
]
t_df = pd.read_csv(taxonomy_table, index_col=0,sep='\t')[
[
"# unique markers (of 43)",
"# multi-copy",
"Insertion branch UID",
"Taxonomy (contained)",
"Taxonomy (sister lineage)",
"GC",
"Genome size (Mbp)",
"Gene count",
"Coding density",
]
]
df = pd.concat([c_df, t_df], axis=1)
return df
def load_checkm_tax(checkm_taxonomy_file):
checkmTax=
|
pd.read_table(checkm_taxonomy_file,index_col=0)
|
pandas.read_table
|
'''
Data_Combiner.py
Created on: July 22th, 2019
Author: <NAME>
Script intended to take all of the .xlsx files from a folder and combine them into a single csv file.
Additional data features including a moving average and a change in each variable are added to potentially improve
machine learning algorithms, which is the next code to run after combining all of the data.
Last updated 7/24/19
'''
import os, csv
import pandas as pd
import numpy as np
file_list = os.listdir()
file_list.remove('Data_Combiner.py')
if('All_Data.csv' in file_list):
file_list.remove('All_Data.csv')
if('Data_Analysis.py' in file_list):
file_list.remove('Data_Analysis.py')
if('Data_Collector_script.py' in file_list):
file_list.remove('Data_Collector_script.py')
#print(file_list)
def avging(df, col, num_points):
indexer = 0
avging = np.array([df[col][0]]*num_points)
avg = []
for value in df[col]:
avging[indexer] = value
avg.append(np.sum(avging).astype(np.float32)/num_points)
if indexer == num_points-1:
indexer = 0
else:
indexer = indexer + 1
return avg
def delta_var(df, col):
deltas = []
past_val = df[col][0]
for value in df[col]:
deltas.append(value - past_val)
past_val = value
return deltas
add_columns = ['Xavg', 'Yavg', 'Zavg', 'Pitchavg', 'Rollavg', 'Yawavg', 'dx', 'dy', 'dz', 'dpitch', 'droll', 'dyaw']
base_columns = ['Accel_x', 'Accel_y', 'Accel_z', 'Pitch', 'Roll', 'Yaw']
moving_average_num = 5
main_df = pd.DataFrame(columns = base_columns + add_columns + ['Danger'])
empty_files = []
for item in file_list:
df =
|
pd.read_excel(item)
|
pandas.read_excel
|
import json
import multiprocessing as mp
import os
import re
import subprocess
import sys
import warnings
from functools import partial
from operator import itemgetter
from random import randint
import cxxfilt
import numpy as np
import pandas as pd
from fuzzywuzzy import fuzz
from sklearn.cluster import KMeans
from sofa_common import *
from sofa_config import *
from sofa_models import SOFATrace
from sofa_print import *
sofa_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category"] # 12
"""
Move sofa_hsg from sofa_preprocess to sofa_hsg
Goal:
step 1 sofa record "the program" --logdir sofalog1
step 2 sofa record "the program" --logdir sofalog2
step 3 sofa diff --base_logdir=sofalog1 --match_logdir=sofalog2
"""
def list_downsample(list_in, plot_ratio):
new_list = []
for i in range(len(list_in)):
if i % plot_ratio == 0:
# print("%d"%(i))
new_list.append(list_in[i])
return new_list
def cpu_trace_read_hsg(sample, t_offset, cfg, cpu_mhz_xp, cpu_mhz_fp):
fields = sample.split()
event = event_raw = 0
counts = 0
if re.match(r'\[\d+\]', fields[1]) is not None:
time = float(fields[2].split(':')[0])
func_name = '[%s]'%fields[4].replace('-','_') + fields[6] + fields[7]
counts = float(fields[3])
event_raw = 1.0 * int("0x01" + fields[5], 16)
# add new column to cpu_traces
feature_types = fields[3].split(':')[0]
mem_addr = fields[5]
else:
time = float(fields[1].split(':')[0])
func_name = '[%s]'%fields[3].replace('-','_') + fields[5] + fields[6]
counts = float(fields[2])
event_raw = 1.0 * int("0x01" + fields[4], 16)
# add new column to cpu_traces
feature_types = fields[3].split(':')[0]
mem_addr = fields[4]
if not cfg.absolute_timestamp:
time = time - cfg.time_base
t_begin = time + t_offset
t_end = time + t_offset
if len(cpu_mhz_xp) > 1:
duration = counts/(np.interp(t_begin, cpu_mhz_xp, cpu_mhz_fp)*1e6)
else:
duration = counts/(3000.0*1e6)
event = np.log10(event_raw)
if cfg.perf_events.find('cycles') == -1:
duration = np.log2(event_raw/1e14)
trace = [t_begin, # 0
event, # % 1000000 # 1
duration, # 2
-1, # 3
-1, # 4
0, # 5
0, # 6
-1, # 7
-1, # 8
int(fields[0].split('/')[0]), # 9
int(fields[0].split('/')[1]), # 10
func_name, # 11
0, # 12
feature_types, # 13
mem_addr] # 14
return trace
def random_generate_color():
rand = lambda: randint(0, 255)
return '#%02X%02X%02X' % (rand(), rand(), rand())
def kmeans_cluster(num_of_cluster, X):
'''
num_of_cluster: how many groups of data you prefer
X: input taining data
'''
random_state = 170
try:
num_of_cluster = 5
y_pred = KMeans(n_clusters=num_of_cluster, random_state=random_state).fit_predict(X)
except :
num_of_cluster = len(X) # minimum number of data
y_pred = KMeans(n_clusters=num_of_cluster, random_state=random_state).fit_predict(X)
return y_pred
def sofa_hsg(cfg, swarm_groups, swarm_stats, t_offset, cpu_mhz_xp, cpu_mhz_fp):
"""
hierarchical swarm generation
"""
with open(cfg.logdir + 'perf.script') as f, warnings.catch_warnings():
warnings.filterwarnings("ignore")
samples = f.readlines()
print_info(cfg, "Length of cpu_traces for HSG = %d" % len(samples))
if len(samples) > 0:
with mp.Pool() as pool:
res = pool.map(
partial(
cpu_trace_read_hsg,
t_offset = t_offset,
cfg = cfg,
cpu_mhz_xp = cpu_mhz_xp,
cpu_mhz_fp = cpu_mhz_fp
),
samples)
cpu_traces = pd.DataFrame(res)
sofa_fieldnames_ext = sofa_fieldnames + ["feature_types", "mem_addr"] # mem_addr for swarm-diff
cpu_traces.columns = sofa_fieldnames_ext
cpu_traces.to_csv(
cfg.logdir + 'hsg_trace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
res_viz = list_downsample(res, cfg.plot_ratio)
swarm_cpu_traces_viz = pd.DataFrame(res_viz)
swarm_cpu_traces_viz.columns = sofa_fieldnames_ext
char1 = ']'
char2 = '+'
# demangle c++ symbol, little dirty work here...
swarm_cpu_traces_viz['name'] = swarm_cpu_traces_viz['name'].apply(
lambda x: cxxfilt.demangle(str( x[x.find(char1)+1 : x.find(char2)].split('@')[0] ))
)
### N features ###
## In order to merge, give unique id of each data within 10 msec by time quotient
swarm_cpu_traces_viz['quotient'] = swarm_cpu_traces_viz['timestamp'].apply(lambda x: int( x * 1000 // 10)) # //: quotient
# count feature_types in each 10 msec groups, and create a dictionary for mapping
df2s = {}
for quotient, dataframe in swarm_cpu_traces_viz.groupby(['quotient','event']):
# api value_counts(): return pandas series
df2s[quotient] = dataframe.feature_types.value_counts()
df2 = pd.DataFrame.from_dict(df2s, orient='index').fillna(0).astype(np.int64)
df = swarm_cpu_traces_viz.copy()
swarm_cpu_traces_viz = pd.merge(df, df2, left_on=['quotient','event'], right_index=True).copy()
### swarm seperation by memory location
#swarm_groups = []
feature_list = ['event']
if cfg.hsg_multifeatures:
with open(cfg.logdir+'perf_events_used.txt','r') as f:
lines = f.readlines()
feature_list.extend(lines[0].split(','))
try:
feature_list.remove('cycles')
feature_list.remove('event')
except:
pass
print_info(cfg, 'HSG features: '+','.join(feature_list))
idx = 0
showing_idx = 0
if len(cpu_traces) > 0:
# get memory index by cheange float to integer
swarm_cpu_traces_viz['event_int'] = swarm_cpu_traces_viz.event.apply(lambda x: int(x)) # add new column 'event_int'
# swarm seperate
event_groups = swarm_cpu_traces_viz.groupby('event_int')
#swarm_stats = []
# add different swarm groups
for mem_index, l1_group in event_groups:
# kmeans
X = pd.DataFrame(l1_group['event'])
num_of_cluster = 2
y_pred = kmeans_cluster(num_of_cluster, X)
# add new column
# TODO: Eliminate warning of SettingWithCopyWarning
l1_group['cluster'] = y_pred
#for i in range(len(y_pred)):
# group.loc[i, 'cluster'] = y_pred[i]
# group by new column
clusters = l1_group.groupby('cluster')
for l2_group_idx, l2_group in clusters:
# group by process id
#pid_clusters = cluster.groupby('pid')
X = pd.DataFrame(l2_group['event'])
num_of_cluster = 4
y_pred = kmeans_cluster(num_of_cluster, X)
# add new column
l2_group['cluster'] = y_pred
#for i in range(len(y_pred)):
# l2_group.loc[i, 'cluster'] = y_pred[i]
# group by new column
l3_groups = l2_group.groupby('cluster')
for l3_group_idx, l3_group in l3_groups:
# kmeans
X = pd.DataFrame(l3_group['event'])
num_of_cluster = 4
y_pred_pid_cluster = kmeans_cluster(num_of_cluster, X)
# add new column
l3_group['cluster_in_pid'] = y_pred_pid_cluster
# group by new column
cluster_in_pid_clusters = l3_group.groupby('cluster_in_pid')
for mini_cluster_id, cluster_in_pid_cluster in cluster_in_pid_clusters:
# duration time
total_duration = cluster_in_pid_cluster.duration.sum()
mean_duration = cluster_in_pid_cluster.duration.mean()
count = len(cluster_in_pid_cluster)
# swarm diff
# caption: assign mode of function name
mode = str(cluster_in_pid_cluster['name'].mode()[0]) # api pd.Series.mode() returns a pandas series
mode = mode.replace('::', '@') # str.replace(old, new[, max])
# print('mode of this cluster: {}'.format(str(mode[:35]))) # uncomment this line of code when you need to check the mode of cluster
swarm_stats.append({'keyword': 'SWARM_' + '["' + str(mode[:35]) + ']' + ('_' * showing_idx),
'duration_sum': total_duration,
'duration_mean': mean_duration,
'example':cluster_in_pid_cluster.head(1)['name'].to_string().split(' ')[2],
'count':count})
swarm_groups.append({'group': cluster_in_pid_cluster.drop(columns = ['event_int', 'cluster', 'cluster_in_pid']), # data of each group
'color': random_generate_color(),
'keyword': 'SWARM_' + '[' + str(mode[:35]) + ']' + ('_' * showing_idx),
'total_duration': total_duration})
idx += 1
swarm_groups.sort(key=itemgetter('total_duration'), reverse = True) # reverse = True: descending
swarm_stats.sort(key=itemgetter('duration_sum'), reverse = True)
print_title('HSG Statistics - Top-%d Swarms'%(cfg.num_swarms))
print('%45s\t%13s\t%30s'%('SwarmCaption', 'ExecutionTime[sum,mean,count] (s)', 'Example'))
for i in range(len(swarm_stats)):
if i >= cfg.num_swarms:
break
else:
swarm = swarm_stats[i]
print('%45s\t%.6lf, %.6lf, %6d\t%45s' % (swarm['keyword'],
swarm['duration_sum']/4.0,
swarm['duration_mean']/4.0,
swarm['count'], swarm['example']))
return swarm_groups, swarm_stats
def sofa_hsg_to_sofatrace(cfg, swarm_groups, traces): # record_for_auto_caption = True # temperarily: for auto-caption
dummy_i = 0
auto_caption_filename_with_path = cfg.logdir + 'auto_caption.csv'
with open(auto_caption_filename_with_path,'w') as f:
f.close()
for swarm in swarm_groups[:cfg.num_swarms]:
if cfg.display_swarms:
sofatrace = SOFATrace() # file.class
sofatrace.name = 'swarm' + str(dummy_i) # avoid errors casued by JavaScript. No special meaning, can be random unique ID.
sofatrace.title = swarm['keyword'] # add number of swarm
sofatrace.color = swarm['color']
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = swarm['group'].copy()
traces.append(sofatrace)
# append to csv file every time using pandas funciton
swarm['group']['cluster_ID'] = dummy_i # add new column cluster ID to dataframe swarm['group']
copy = swarm['group'].copy()
#print('*************************')
copy.to_csv(auto_caption_filename_with_path, mode='a', header=False, index=False)
#print('\nRecord for auto-caption, data preview: \n{}'.format(copy.head(2)))
#print('*************************')
# --- for auto-caption --- #
dummy_i += 1
csv_input = pd.read_csv(auto_caption_filename_with_path, names=list(copy))
if 'instructions' not in copy.columns:
csv_input.insert(17, 'instructions', 0)
if 'cache-misses' not in copy.columns:
csv_input.insert(18, 'cache-misses', 0)
if 'branch-miss' not in copy.columns:
csv_input.insert(19, 'branch-misses', 0)
csv_input.to_csv(auto_caption_filename_with_path, header=False)
return traces
def matching_two_dicts_of_swarm(standard_dict, matching_dict, res_dict):
"""
String Matching Funciton:
match two dictoinaries with same amount of key-value pairs
and return matching result, a dict of dict called res_dict.
* standard_dict: The standard of dict
* matching_dict: The dict that i want to match
* res_dict: the result, a dict of dict
"""
key = 0 # key: number, no string
pop_list = [k for k,v in matching_dict.items()]
#print(pop_list)
for i in standard_dict.keys(): # control access index of standard_dict. a more pythonic way
threshold = 0
for j in pop_list: # control access index of matching_dict
f_ratio = fuzz.ratio(standard_dict[i], matching_dict[j])
if f_ratio > threshold: # update matching result only when the fuzz ratio is greater
#print('New matching fuzz ratio {} is higher than threshold {}'\
# .format(f_ratio, threshold))
key = j # update key
threshold = f_ratio # update threshold value
#print('Update new threshold {}'\
# .format(threshold))
res_dict.update({i: {j: matching_dict[i]}}) #
# pop out matched key-value pair of matching dict
if pop_list:
pop_list.remove(key) # remove specific value. remove() fails when no elements remains
#print(res_dict)
return res_dict # return result dict
def evaluation_of_matching_result(base_df, matching_df1, final_df, eval_list, tmp_dict):
"""
calculate intersection rate of two dataframe
intersection rate = num_t_stdswarm / total_num_t_mtchswarm
num_t_stdswarm: traces in standard swarm
total_num_t_mtchswarm: total traces number in matching swarm
"""
base_duration_list = []
match_duration_list = []
diff_list = []
# calculate num_t_stdswarm & total_num_t_mtchswarm
for id_of_cluster in final_df.index:
base_id = final_df['base_cluster_ID'].loc[id_of_cluster]
bs_df = base_df.groupby(['cluster_ID','function_name'])\
.agg({'function_name':['count']})\
.loc[base_id]\
.reset_index()
bs_df.columns = ['base_func_name', 'count']
# sum up duration time
base_total_duration = base_df['duration'].loc[base_df['cluster_ID'] == id_of_cluster].sum()
#print('base_total_duration = {} sec'.format(base_total_duration))
#print('Function name in cluster: \n{}\n'.format(bs_df.sort_values(by=['count'], ascending=False)))
# total_num_t_mtchswarm
match_id = final_df['match_cluster_ID'].loc[id_of_cluster]
match_df = matching_df1.groupby(['cluster_ID','function_name'])\
.agg({'function_name':['count']})\
.loc[match_id]\
.reset_index()
match_df.columns = ['match_func_name', 'count']
# sum up duration time
match_total_duration = matching_df1['duration'].loc[matching_df1['cluster_ID'] == id_of_cluster].sum()
total_num_t_mtchswarm = match_df['count'].sum()
#print('match_total_duration = {} sec'.format(match_total_duration))
#print('Function name in cluster: \n{}\n'.format(match_df.sort_values(by=['count'], ascending=False)))
#print('---------------------------------------------------------')
#print('Total number of function name in cluster: {}'.format(total_num_t_mtchswarm))
# add total duration of each cluster
base_duration_list.append(base_total_duration)
match_duration_list.append(match_total_duration)
diff_list.append(abs(base_total_duration - match_total_duration))
# To calculate num_t_stdswarm, get intersection of two cluster first
intersected_df = bs_df.merge(match_df, left_on='base_func_name', right_on='match_func_name', how='outer')
intersected_df.dropna(inplace=True) # drop row with NaN value and inplace
intersected_df['min_value'] = intersected_df.min(axis=1)
num_t_stdswarm = intersected_df['min_value'].sum()
intersect_percent = num_t_stdswarm * 100 / float(total_num_t_mtchswarm) # float number
if(intersect_percent != 0.0):
eval_list.append(intersect_percent)
#print('merge frame:\n {}\n'.format(intersected_df))
#print('num_t_stdswarm = {}'.format(num_t_stdswarm))
#print('intersection rate = (num_t_stdswarm / total_num_t_mtchswarm) x 100% = {}%'.format(intersect_percent))
#print('---------------------------------------------------------')
#break; # test only one cluster
# How many cluster match correctly
intersect_percent = len(eval_list) * 100.0 / len(base_df['cluster_ID'].unique())
#print('Number of intersection rate > 0% percent: {}%'.format(intersect_percent)) #
# deal with duration time of each cluster among two dataframes
tmp_dict = {'base_duration(sec)': base_duration_list, 'match_duration(sec)': match_duration_list, 'cluster_diff(sec)': diff_list}
tmp_df = pd.DataFrame.from_dict(tmp_dict) # dummy dataframe, just for concatenation
final_df = pd.concat([final_df, tmp_df], axis=1, sort=False) # axis=1: horizontal direction
print('Diff Report: \n{}'.format(final_df))
return final_df # return final_df in case information lost
def sofa_swarm_diff(cfg):
"""
swarm diff: design for auto-caption. compare two different sofalog
"""
#print('Python verison: {}'.format(sys.version)) # check python version
column_list = ["timestamp", "event", "duration",
"deviceId", "copyKind", "payload",
"bandwidth", "pkt_src", "pkt_dst",
"pid", "tid", "function_name", "category",
"feature_types", "mem_addr", "quotient",
"cycles", "instructions", "cache-misses", "branch-misses",
"cluster_ID"]
base_df = pd.read_csv(cfg.base_logdir + 'auto_caption.csv', names=column_list)
#print(base_df)
#print('There are {} clusters in standard_df\n'.format(len(base_df['cluster_ID'].unique())))
base_df_groupby = base_df.groupby(['cluster_ID','function_name']).agg({'function_name':['count']})
## --- Need refactor here --- ##
## Access data of multiIndex dataframe
# get column names
#TODO: fix bug of 'the label [0] is not in the [index]'
print(base_df_groupby)
df = base_df_groupby.loc[0].reset_index()
flat_column_names = []
for level in df.columns:
# tuple to list
flat_column_names.extend(list(level)) # extend(): in-place
if '' in flat_column_names:
flat_column_names.remove('')
# remove duplicate and empty
#flat_column_names = filter(None, flat_column_names) # filter empty
flat_column_names = list(set(flat_column_names)) # deduplicate
print('original order: {}'.format(flat_column_names))
# change member order of list due to set is a random order
if flat_column_names[0] == 'count':
myorder = [1,0]
flat_column_names = [flat_column_names[i] for i in myorder]
# print('New order: {}'.format(flat_column_names))
base_df_dict = {}
# Transform multi-index to single index, and update string to dict standard_df_dict
for id_of_cluster in base_df['cluster_ID'].unique():
#print('\nCluster ID : {}'.format(id_of_cluster))
df = base_df_groupby.loc[id_of_cluster].reset_index()
df.columns = flat_column_names
#print(df.sort_values(by=['count'], ascending=False)) # pd.DataFrame.sort_values() return a DataFrame
base_df_dict.update({id_of_cluster: df.function_name.str.cat(sep=' ', na_rep='?')})
## Dataframe that i want to match
matching_df1 = pd.read_csv(cfg.match_logdir + 'auto_caption.csv', names=column_list)
matching_df1_groupby = matching_df1.groupby(['cluster_ID','function_name']).agg({'function_name':['count']})
# get column names
df = matching_df1_groupby.loc[0].reset_index()
flat_column_names = []
for level in df.columns:
# tuple to list
flat_column_names.extend(list(level)) # extend(): in-place
# remove duplicate and empty
flat_column_names = filter(None, flat_column_names) # filter empty
flat_column_names = list(set(flat_column_names)) # deduplicate
# print(flat_column_names)
# change member order of list due to set is a random order
if flat_column_names[0] == 'count':
myorder = [1,0]
flat_column_names = [flat_column_names[i] for i in myorder]
# print('New order: {}'.format(flat_column_names))
matching_df1_dict = {}
# Transform multi-index to single index, and update string to dict standard_df_dict
for id_of_cluster in matching_df1['cluster_ID'].unique():
#print('\nCluster ID : {}'.format(id_of_cluster))
df = matching_df1_groupby.loc[id_of_cluster].reset_index()
df.columns = flat_column_names
# print(df.sort_values(by=['count'], ascending=False))
matching_df1_dict.update({id_of_cluster: df.function_name.str.cat(sep=' ', na_rep='?')})
## --- Need refactor here --- ##
res_dict = {}
res_dict = matching_two_dicts_of_swarm(base_df_dict, matching_df1_dict, res_dict)
## show all stats (Ans) and matching results (algorithm)
base_dict_to_df = pd.DataFrame.from_dict(base_df_dict, orient='index', columns=['Before: function_name'])
base_dict_to_df['base_cluster_ID'] = base_dict_to_df.index
base_dict_to_df = base_dict_to_df[['base_cluster_ID', 'Before: function_name']]
res_dict_to_df = pd.DataFrame() # create an empty frame
res_list = [k for k,v in res_dict.items()]
for key in res_list:
df = pd.DataFrame.from_dict(res_dict[key], orient='index', columns=['After: funciton name']) # res_dict[key]: a dict
df['match_cluster_ID'] = df.index
res_dict_to_df = res_dict_to_df.append(df, ignore_index=True) # df.append(): not in-place
res_dict_to_df = res_dict_to_df[['match_cluster_ID', 'After: funciton name']]
final_df =
|
pd.concat([base_dict_to_df, res_dict_to_df], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
import datetime as dt
def make_column_index(df:pd.DataFrame, column_label:str) -> None:
df.index = df[column_label]
df.drop(column_label, axis=1, inplace=True)
df.index.name = None
def rename_column(df:pd.DataFrame, column_label:str, new_name:str) -> None:
df.rename(columns={column_label: new_name}, inplace=True)
def remove_outliers(df:pd.DataFrame, column_label:str) -> str:
raw_data = df[column_label]
mean = np.mean(raw_data)
std_dev = np.std(raw_data)
outliers_cutoff = std_dev * 3
lower_limit = mean - outliers_cutoff
upper_limit = mean + outliers_cutoff
no_outliers = raw_data.apply(lambda x: mean if x > upper_limit or x < lower_limit else x)
outlier_column = f'{column_label} (-outliers)'
df[outlier_column] = no_outliers
return outlier_column
def unstack_data(df:pd.DataFrame, metric_column:str, unstack_column:str) -> pd.DataFrame:
pivoted = pd.pivot_table(df, index=['date'], values=[metric_column], columns=[unstack_column], aggfunc=[np.sum])
pivoted.columns = pivoted.columns.droplevel(0)
pivoted.columns.name = None
pivoted = pivoted.reset_index()
pivoted.columns = [col[1] for col in pivoted.columns]
metric_columns = list(pivoted.columns[1:])
metric_columns = [f"{c} | {metric_column}" for c in metric_columns]
pivoted.columns = ["date"] + metric_columns
pivoted.fillna(0, inplace=True)
return pivoted
def transpose_data(df:pd.DataFrame) -> pd.DataFrame:
date_col = df.columns[0]
df = df.T
df.columns = df.iloc[0]
df.drop(df.index[0], inplace=True)
df.reset_index(inplace=True)
df.rename(columns={"index": date_col}, inplace=True)
df = df.rename_axis(None, axis = 1)
return df
def interpolate_weekly_data(df, date_col=None, resample_col=None):
df = df.copy()
if date_col == None:
date_col = df.columns[0]
if resample_col == None:
resample_col = df.columns[1]
df[date_col] = df[date_col].apply(lambda x: dt.datetime.strptime(f"{x}-1", "%Y-%W-%w")) # mondays
df[date_col] = pd.to_datetime(df[date_col]) # datetime
df.set_index(date_col, inplace=True)
df_reindexed = df.reindex(pd.date_range(start=df.index.min(),
end=df.index.max() + dt.timedelta(days=6),
freq='1D'))
col_to_resample = df_reindexed.columns[0]
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].fillna(0)
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].astype(str)
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace(',',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('$',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('£',''))
df_reindexed[col_to_resample] = df_reindexed[col_to_resample].apply(lambda x: x.replace('€',''))
df_reindexed[col_to_resample] = pd.to_numeric(df_reindexed[col_to_resample])
df_reindexed[col_to_resample].replace({0:np.nan}, inplace=True)
df = df_reindexed.interpolate(method='linear')
df = df / 7
df.reset_index(inplace=True)
df.rename({'index': 'date'}, axis=1, inplace=True)
return df
def interpolate_monthly_data(df, date_col=None, resample_col=None):
df = df.copy()
if date_col == None:
date_col = df.columns[0]
if resample_col == None:
resample_col = df.columns[1]
df[date_col] =
|
pd.to_datetime(df[date_col], format="%Y-%m")
|
pandas.to_datetime
|
"""
Analysis functions for nodeAnalysis.ipynb.
<NAME>
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import seaborn as sns
def getEdgePair(node1, node2, edges):
"""
Get edge information that involves the specified two nodes.
This function uses node indices as parameters; to pass in
protein residue indices, use the function getContactPair.
Parameters
----------
node1 : int
Index of node_i. This is the node index not protein index.
node2 : int
Index of node_j. Should be larger than node_i.
edges : pandas dataframe
Dataframe in which to search for interaction.
Returns
-------
pandas dataframe
Notes
-----
Can't find the interaction you're looking for? Might need to
add/subtract an offset if you used the diffEdges function.
The offset value (with sign and magnitude) should have been
printed out: "Shifting node indices by..."
"""
return edges[(edges.node_i == node1) & (edges.node_j == node2)]
def getContactPair(res1, res2, nodes, edges):
"""
Get edge information that involves the two specified protein residues.
This function is similar to getEdgePair but takes into protein indices.
Parameters
----------
res1 : int
protein residue number to use for node_i
res2 : int
protein resiude number to use for node_j. Should be greater than res1.
nodes : pandas dataframe
Dataframe in which to translate protein index to node index (indices).
edges : pandas dataframe
Dataframe in which to search for interaction.
Returns
-------
pandas dataframe
"""
df1 = getResidInfo(res1,nodes,resExcludes=['WAT'])
df2 = getResidInfo(res2,nodes,resExcludes=['WAT'])
indexList1 = df1.index.tolist()
indexList2 = df2.index.tolist()
print(df1, '\n\n', df2)
return edges[(edges.node_i.isin(indexList1)) & (edges.node_j.isin(indexList2))]
def findInEdges(nodeNum, edges, att=None):
"""
Find the specified node index in either node_i or node_j columns of input edges df.
Parameters
----------
nodeNum : int
This is the node index not protein index.
edges : pandas dataframe
Dataframe in which to search for node.
Returns
-------
pandas dataframe
"""
edges = edges.copy()
if att is not None:
return edges[(edges.attribute == att) & ((edges.node_i == nodeNum) | (edges.node_j == nodeNum))]
else:
return edges[(edges.node_i == nodeNum) | (edges.node_j == nodeNum)]
def getResidInfo(resid, nodes, resExcludes=[]):
"""
Get the node information for specified protein residue index.
Parameters
----------
resid : int
protein residue number
nodes : pandas dataframe
Dataframe in which to translate protein index to node index (indices).
resExcludes: list
List containing strings for residues to ignore. E.g., ['WAT']
Returns
-------
pandas dataframe
"""
nodes_id = nodes.loc[nodes['resid'] == resid]
nodes_id = nodes_id[~nodes_id.resname.isin(resExcludes)]
return nodes_id
def idxToResid(idx, nodes, idOnly=False):
"""
This function takes in some node index and generates
a string code of one-letter residue name and integer of residue number.
Parameters
----------
idx : int
integer index of the pandas dataframe
nodes : pandas dataframe
pandas dataframe of which to search
idOnly : Boolean
True to return numpy.int64 of residue number
False to return code with resname abbrev
ex., True returns 150; False returns 'F150:sc'
Returns
-------
string or numpy.int64 value of residue (based on idOnly parameter)
"""
aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'HSD': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M',
'GBI1':'GBI1', 'GBI2':'GBI2', 'WAT':'WAT'}
#old way not conducive to taking diff of dataframes
#entry = nodes.iloc[idx-1]
entry = nodes.loc[nodes.index == idx] # iloc gives series, loc gives dataframe
entry = entry.T.squeeze() # convert from dataframe to series
resname = entry['resname']
if resname in ['GBI1','GBI2']:
code = aa_dict[resname]+':'+entry['code']
elif resname == 'WAT':
code = aa_dict[resname]+str(entry['resid'])
else: # if not GBI or WAT, must be Hv1
if idOnly: code = entry['resid']
else: code = aa_dict[resname]+str(entry['resid'])+':'+entry['location']
return code
def trimEdgesTM(nodes, edges):
"""
Process edges to remove edges that don't include transmembrane (TM)
protein residue. In other words, only keep edges that involve at
least one TM residue. The TM residue may contact a non-TM residue,
water, or another TM residue.
Parameters
----------
nodes : pandas dataframe
Pandas dataframe with information on nodes (residues)
edges : pandas dataframe
Pandas dataframe with information on edges (contacts)
Returns
-------
pandas dataframe
"""
# define which residues are in TM region
seg1 = list(range(99,126))
seg2 = list(range(134,161))
seg3 = list(range(168,192))
seg4 = list(range(198,221))
segtm = seg1+seg2+seg3+seg4
# get node indices for residues in TM region
protein_nodes = nodes[(nodes['resid'].isin(segtm)) & (nodes['resname'] != 'WAT')]
prot_node_ids = protein_nodes.index.tolist()
# keep edges with at least one node that is a TM residue
return edges[ (edges['node_i'].isin(prot_node_ids)) | (edges['node_j'].isin(prot_node_ids)) ]
def prioritize(edges, rawNum):
"""
TODO
Pull out N strongest interactions, or pivot the table.
Not meant for user; implemented in protLigInts and selectionInts functions.
This function handles cases of whether dataframe has edges or difference of edges.
Parameters
----------
edges : pandas dataframe
rawNum : integer
Returns
-------
pandas dataframe
"""
edges = edges.copy()
try: # Pull out the N strongest interactions (should be no negative values)
edges = edges.sort_values('average',ascending=False).head(rawNum)
except KeyError: # if no 'average' column then this is one df minus another so there are negatives
# sort by magnitude to get + and - changes
tempinds = edges.avg_subt.abs().sort_values(ascending=False).head(rawNum).index
edges = edges.loc[tempinds]
return edges
def pivot(edges, data=""):
"""
TODO
Pull out N strongest interactions, or pivot the table.
Not meant for user; implemented in protLigInts and selectionInts functions.
This function handles cases of whether dataframe has edges or difference of edges.
Parameters
----------
edges : pandas dataframe
rawNum : integer
Returns
-------
pandas dataframe
"""
edges = edges.copy()
if data=="edgetype":
edges = edges.pivot(index='node_i',columns='node_j', values='edgetype')
else:
try:
edges = edges.pivot(index='node_i',columns='node_j', values='average')
except KeyError:
edges = edges.pivot(index='node_i',columns='node_j', values='avg_subt')
edges = edges.dropna(axis=1,how='all') # drop columns with all nan's
return edges
def protLigInts(nodes, edges, rawNum=250, dry=1):
"""
Take in a set of nodes and edges and identify the N strongest interactions.
This function disregards:
(1) interactions between waters (there can be protein-water interaction),
(2) interactions between adjacent residues (e.g., residue F149 and F150), and
(3) interactions within the same residue (e.g., backbone and sidechain of F150).
Parameters
----------
nodes : pandas dataframe
Pandas dataframe with information on nodes (residues)
edges : pandas dataframe
Pandas dataframe with information on edges (contacts)
rawNum : integer
How many interactions to use before further processing.
Further processing = remove adjacent & intra-residue interactions.
dry : integer
2 means no waters at all even to protein/ligand
1 means no water-water interactions
0 means allow waters (NOT YET implemented)
Returns
-------
pandas PIVOTED dataframe with reduced and filtered interactions, formatted as:
> node_i as index column
> node_j as different columns
> average interaction strength in cell intersecting node_i and node_j
"""
edges = edges.copy()
# Get all indices of nodes that are not water
watless_idx = nodes.index[nodes['resname'] != 'WAT'].tolist()
if dry==1:
# Filter interactions with at least one non-water (remove wat-wat interactions)
watless_edges = edges.loc[edges['node_i'].isin(watless_idx) | edges['node_j'].isin(watless_idx)]
elif dry==2:
# Filter interactions with no waters whatsoever
watless_edges = edges.loc[edges['node_i'].isin(watless_idx) & edges['node_j'].isin(watless_idx)]
# Pull out the N strongest interactions
watless_edges = prioritize(watless_edges,rawNum=rawNum)
if watless_edges is None: return
# Make temp copy to compare protein resIDs to filter out those in same/adj resid
temp = watless_edges.copy()
temp['node_i'] = temp['node_i'].apply(idxToResid,args=(nodes,True))
temp['node_j'] = temp['node_j'].apply(idxToResid,args=(nodes,True))
# convert the non-protein residues with no ID for temp integer
temp['node_i'].replace('GBI\w', -500, regex=True,inplace=True)
temp['node_j'].replace('GBI\w', -500, regex=True,inplace=True)
temp['node_i'].replace('WAT\w', -400, regex=True,inplace=True)
temp['node_j'].replace('WAT\w', -400, regex=True,inplace=True)
# drop node interactions in same resid or adjacent
dropinds = temp.index[((temp['node_i']-temp['node_j']).abs() <= 1) == True].tolist()
watless_edges.drop(dropinds, inplace=True)
return watless_edges
def selectionInts(nodes, edges, indices, rawNum=50, dry=True):
"""
Parameters
----------
nodes : pandas dataframe
Pandas dataframe with information on nodes (residues)
edges : pandas dataframe
Pandas dataframe with information on edges (contacts)
indices : list of integers
List of node indices of selection. Two examples:
1. gidx_1 = nodes_1.index[nodes_1['resname'] == 'GBI1'].tolist()
2. selNodes = getResidInfo(211, nodes_2, resExcludes=['WAT'])
selInds = selNodes.index.tolist()
rawNum : int
How many interactions to use before further processing.
Further processing = remove adjacent & intra-residue interactions.
dry : Boolean
True to ignore any water-interactions of given selection
Returns
-------
sel_edges - pandas PIVOTED dataframe with interactions for given selection
new format:
> node_i as index column
> node_j as different columns
> average interaction strength in cell intersecting node_i and node_j
Examples of selecting indices
-----------------------------
> gidx_1 = nodes_1.index[nodes_1['resname'] == 'GBI1'].tolist()
> selNodes = getResidInfo(211, nodes_2, resExcludes=['WAT'])
> selInds = selNodes.index.tolist()
"""
sel_edges = edges.copy()
if dry:
watidx = nodes.index[nodes['resname'] == 'WAT'].tolist() # get water indices
sel_edges = sel_edges[(~sel_edges['node_i'].isin(watidx)) & (~sel_edges['node_j'].isin(watidx))]
# Get all the edge interactions that relate to selection
sel_edges = sel_edges.loc[sel_edges['node_i'].isin(indices) | sel_edges['node_j'].isin(indices)]
# Pull out the N strongest interactions
sel_edges = prioritize(sel_edges,rawNum=rawNum)
if sel_edges is None: return
# put all the GBI nodes in the i spot
sel_edges["node_i"], sel_edges["node_j"] = np.where(sel_edges['node_j'].isin(indices),
[sel_edges["node_j"], sel_edges["node_i"]], [sel_edges["node_i"], sel_edges["node_j"]])
sel_edges = sel_edges[~sel_edges['node_j'].isin(indices)] # remove self-interactions
return sel_edges
def plotHeatInts(nodes,edges,minHeat=0,maxHeat=20,colors=None,size=(20,20),seltitle="",pivoted=False):
"""
Parameters
----------
nodes : pandas dataframe
edges : pandas dataframe
Pivoted pandas dataframe (node_i indices as header, node_j indices as left column.)
minHeat : integer
Minimum data point in heat color bar.
Should be zero unless there's a special case.
maxHeat : integer
Maximum data point in heat color bar.
May want to manually adjust if max edge data > default maxHeat.
colors : string
String code referring to one of Python's color maps.
https://matplotlib.org/examples/color/colormaps_reference.html
Use a parameter of colors="edges" to color by edge type instead of strength.
size : tuple
Tuple of length 2 for (width, length) in matplotlib
seltitle : string
Title to list at the top of the plot
pivoted : Boolean
Whether or not the input edges is already pivoted (such as from s
Returns
-------
pivoted dataframe
"""
def offsetHeatGrid():
# offset the y-grid to match the label WITHOUT offsetting ticklabels
yticks_old = ax.get_yticks()
if len(yticks_old) > 1:
yticks_offset = (yticks_old[1]-yticks_old[0])/2
yticks = [(tick-yticks_offset) for tick in ax.get_yticks()]
ax.set_yticks(yticks) # grid will use these new tick placements
ax.set_yticks(yticks_old,minor=True)
ax.set_yticklabels(ylabels,minor=True) # put labels back in old placements
ax.set_yticklabels([]) # turn off labels at new tick placements
# offset the x-grid to match the label WITHOUT offsetting ticklabels
xticks_old = ax.get_xticks()
if len(xticks_old) > 1:
xticks_offset = (xticks_old[1]-xticks_old[0])/2
xticks = [(tick-xticks_offset) for tick in ax.get_xticks()]
ax.set_xticks(xticks) # grid will use these new tick placements
ax.set_xticks(xticks_old,minor=True)
ax.set_xticklabels(xlabels,minor=True) # put labels back in old placements
ax.set_xticklabels([]) # turn off labels at new tick placements
def label_edge(row):
# https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns
if row['attribute'] == "HPHOB": # two nonpolar nodes
return 1
if row['attribute'] == "COUL": # two charged nodes
return 2
if row['attribute'] == "HBOND": # dipolar and charged nodes
return 3
if row['attribute'] == "STER": # everything else
return 4
return -1
if (colors=="edgetype" and pivoted==True):
print("Cannot color by edgetype if you have pass in a pivoted edge plot.")
return
if pivoted:
plotInput = edges
elif colors=="edgetype":
# reassign the strength values in the edges dataframe
plotInput = edges.copy()
plotInput['edgetype'] = plotInput.apply (lambda row: label_edge(row),axis=1)
plotInput = pivot(plotInput, data='edgetype')
else:
plotInput = pivot(edges)
plotNodes = nodes
# generate plot labels based on residue name and residue number
ylabels = [idxToResid(i, plotNodes) for i in list(plotInput)] # get node_j's, convert idxToResid
xlabels = [idxToResid(i, plotNodes) for i in list(plotInput.index.values)] # get node_i's, convert idxToResid
# plot the data
plt.clf()
plt.subplots(figsize=size)
sns.set(font_scale=2.1)
if colors=='edgetype':
colors="tab10"
vmin=0
vmax=4
ax = sns.heatmap(plotInput.T,annot=True,yticklabels=ylabels,xticklabels=xlabels,
cmap=colors,vmin=minHeat, vmax=maxHeat)
offsetHeatGrid()
plt.grid()
plt.ylabel('')
plt.xlabel('')
plt.yticks(rotation=0)
plt.title('\"Strongest\" interactions of {}'.format(seltitle))
plt.show()
print('interaction range is from {} to {}; verify if this is appropriate'.format(minHeat,maxHeat))
return plotInput
def diffEdges(nodes_x,nodes_y,edges_x,edges_y):
"""
Take one set of edges and subtract another. This can identify changes in contacts
between different systems, such as before and after mutation.
USE CASES:
[1] taut1 and taut2 with SAME protein system but differs in 2GBI and maybe in waters
[2] tautx before and after mutation, all else the same (same 2GBI and waters)
"""
nodes_x = nodes_x.copy()
nodes_y = nodes_y.copy()
edges_x = edges_x.copy()
edges_y = edges_y.copy()
# take union of both dataframes wrt to nodes_y, and ...
df_1 =
|
pd.merge(nodes_y, nodes_x, how='outer', indicator=True)
|
pandas.merge
|
#<NAME> 29-04-18
#Iris dataset GMIT project
#Import dependencies:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as pt
#This gives headings to the datas columns
heading=["sepal-length","sepal-width","petal-length","petal-width","Species"]
#Using panda this reads in the data as csv and assigns headings
Data = pd.read_csv('data/iris.csv',names=heading)
# Check for missing data and remove rows if missing data
EmptyIdx =
|
pd.isnull(Data)
|
pandas.isnull
|
import pandas as pd
from Client import clientclass
from Server import serverclass
from Stats import *
from LTE import *
from NodeDiscovery import *
from policy import *
from dynamic_plot import *
from user import *
from multiprocessing import Process
import multiprocessing
import os.path
import os
import shutil as sh
import time
import datetime
import sys
def choose_vm(dfvm, user_def):
"""The choose_vm function filters the dataset which has multiple vm migrations
by the user defined proprieties. Depending on the mode the simulation
is running the resulting dataframe may be a set of VMs that follow the
specifications or a single VM from a given ID that the user introduced.
The first situation being vm_mode = 1 and the second situation being vm_mode = 0
:param dfvm: Dataframe with the VMs
:type dfvm: Pandas dataframe
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns a filtered dataframe with the VMs that will be used
:rtype: Pandas dataframe
"""
if(user_def.vm_mode == 0):
dfvm = dfvm[dfvm['Migration ID'].values == user_def.vm_id]
dfvm = dfvm.reset_index(drop=True)
elif(user_def.vm_mode == 1):
dfvm = dfvm[dfvm['Migration Technique'].values == user_def.migtype]
dfvm = dfvm[dfvm['Workload'].values == user_def.benchmark]
dfvm = dfvm[dfvm['Page transfer rate (MB/s)'].values == user_def.PTR]
dfvm = dfvm.reset_index(drop=True)
return dfvm
def setup_folders(user_def):
"""The setup_folders function generates the correct folders for
outputting the results of the simulation.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
if(not os.path.isdir('./Digest/LoopVM')):
os.makedirs('./Digest/LoopVM')
if(not os.path.isdir('./Digest/SingleVM')):
os.makedirs('./Digest/SingleVM')
if(not os.path.isdir(user_def.digest_path)):
os.makedirs(user_def.digest_path)
if(not os.path.isdir('./OUT')):
os.makedirs('./OUT')
for files in os.listdir('./OUT'):
os.remove(os.path.join('./OUT', files))
return 1
def data_to_digest(user_def):
"""The data_to_digest function copies the results of the simulation
to the correct digest folders.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
sh.copy2('./OUT/Node_Determination.xlsx', user_def.digest_path)
sh.copy2('./OUT/Statistics.xlsx', user_def.digest_path)
sh.copy2('./OUT/Client_path.xlsx', user_def.digest_path)
return 1
def check_dataframes(dftrips, dfstations, df_LTE, dfvm, user_def):
"""The check_dataframes function verifies if all datasets provided exist
and have a correct structure based on the provided header of each .csv file.
This function is used to prevent program errors futher in the execution process
by checking if all the information that the user used to run the simulation can actually
be used.
:param dftrips: Dataframe with the trips
:type dftrips: Pandas dataframe
:param dfstations: Dataframe with the edge server stations
:type dfstations: Pandas dataframe
:param df_LTE: Dataframe with the LTE stations
:type df_LTE: Pandas dataframe
:param dfvm: Dataframe with the VMs
:type dfvm: Pandas dataframe
:param user_def: User definitions
:type user_def: Object of the class UserDef
:return: Returns 1 in success
:rtype: Integer
"""
trips = ['TripID', 'TimeStamp', 'Speed', 'Acceleration', 'Heading', 'HeadingChange', 'Latitude', 'Longitude']
stations = ['ID_LTE', 'radio', 'lat', 'lon']
vm = ['Migration ID', 'Migration Technique', 'Workload', 'Page Dirty Rate (4KB pages per second)', 'VM_SIZE (MB)',
'Page transfer rate (MB/s)', 'Total Migration Time (ms)', 'Downtime (ms)', 'Total Transferred Data (KB)']
list_df = [trips, stations, stations, vm]
trips_hd = dftrips.columns.values.tolist()
stations_hd = dfstations.columns.values.tolist()
lte_hd = df_LTE.columns.values.tolist()
vm_hd = dfvm.columns.values.tolist()
list_hd = [trips_hd, stations_hd, lte_hd, vm_hd]
flag = 0
for df in list_df:
for column in df: #
for hd in list_hd:
for column_hd in hd: #
if(column_hd == column):
flag = 1
break
if (flag == 0):
print('Dataset has wrong header sintaxe\n\tCheck header: ', hd)
sys.exit(2)
if(flag == 1):
flag = 0
break
list_hd.pop(0)
list_vm = ['Migration ID', 'Migration Technique', 'Workload', 'Page Dirty Rate (4KB pages per second)']
list_user = [user_def.vm_id, user_def.migtype, user_def.benchmark, user_def.PTR]
for vm_def, usr_def in zip(list_vm, list_user):
if(dfvm.loc[dfvm[vm_def] == usr_def][vm_def].empty):
print('User definitions not found in the provided dataframes\n\t Definition error: ', vm_def)
sys.exit(2)
return 1
def simulation(user_def, dfstations, dfnode, dfpath, clientlist, stationlist):
"""The simulation function is responsible for checking the main sequence of events
that evaluates each step of the client by iterating through the multiple trips and
virutal machines for each coordinate of the client. On each coordinate analyzed an evaluation
is done to see if the migration is viable. If the suggested destination is approved by the
policy evaluator then the migration process occurs. Otherwise, the next coordinate will be
analyzed the same way, until the right opportunity appears. In the process of running the simulation
some data is saved and acquired, so that in the future some results about the migration mechanism can be studied.
:param user_def: User definitions
:type user_def: Object of the class UserDef
:param dfstations: Dataframe with the edge server stations
:type dfstations: Pandas dataframe
:param dfnode: Dataframe used for saving the data required by the dynamic plot
:type dfnode: Pandas dataframe
:param dfvm: Dataframe used for saving the latency and distance data to the user throughout the path
:type dfvm: Pandas dataframe
:param clientlist: List with all the clients
:type clientlist: List
:param clientlist: List with all the edge server stations
:type clientlist: List
:return: Returns 1 in success
:rtype: Integer
"""
#Plot Process
if(user_def.dynamic_plot == 1):
lock = multiprocessing.Lock()
process = Process(target=plot_process, args=(dftrips,dfstations,lock))
plt_dynamic = 0
for client in clientlist:
print(client.dftrip['TripID'].values[0])
client.calc_triptime()
client.calc_tripdistance()
#print(client.vm_df)
for i in range(0, client.vm_df['Migration ID'].count()):
#for i in range(0, 10):
client.vm = client.vm_df.iloc[[i]]
print(i)
client.reset_vars(user_def.cone) #reset vars do cliente
get_client_source(client, dfstations) #give dfmigrations the first source
server = stationlist[client.get_origin_server_id()] #find the first origin server for that client
lte_connection(client, 0) #find the first lte_st for that client
for coor_index in range( 0, client.count_coordinates()):
cone_determination(client, coor_index)
lte_connection(client, coor_index)
client.latencies[0] = get_latency(client, coor_index, client.get_server_origin_coor())
client.distancies[0] = server.calc_distance(client.get_coordinates(coor_index), client.get_server_origin_coor())
if(client.mig_under == 0):
ret_node = node_search(client, coor_index, dfstations, user_def) #-1 correspondes to not finding a posible destination
#ret_node = node_search_close(client, coor_index, dfstations)
if(ret_node != -1):
client.latencies[1] = get_latency(client, coor_index, client.get_server_target_coor())
client.distancies[1] = server.calc_distance(client.get_coordinates(coor_index), client.get_server_target_coor())
if(ret_node != -1 and client.mig_under == 0 and policy_evaluator(server, client, coor_index, user_def)):
client.mig_under = 1
Mt_est = math.ceil(server.migration_time_estimate(client, 2.5, 14.5)) # seconds
Mt_real = math.ceil(client.vm_time_mig(2.5, 14.5)) #seconds
if(user_def.mig_cost == 0):
Mt_est = 0
Mt_real = 0
elapsed = 0
if (Mt_est >= Mt_real):
elapsed = Mt_est
elif (Mt_real >= Mt_est):
elapsed = Mt_real
if(user_def.timeout == 1):
client.timeout = Mt_real + user_def.timeout_multiplier * Mt_real
client.mig_id_inc = client.mig_id_inc + 1
creatstats(client, server, coor_index)
if(user_def.dynamic_plot == 1):
dfnode = df_dynamic_plot(client, dfnode, coor_index, ret_node, lock)
if(plt_dynamic==0):
process.start()
plt_dynamic = 1
dfpath = path_stats(dfpath, client, coor_index)
if(client.mig_under == 1 and ret_node != -1):
#function stats to collect all data during migration
stats_collect(Mt_real, Mt_est, server, client, user_def)
if(elapsed == 0 ):
client.mig_under = 0
# Apagar primeira linha do dfmigrations -> same as saying: migration happend
client.dfmigrations = client.dfmigrations.drop([0], axis='index')
client.dfmigrations = client.dfmigrations.reset_index(drop=True)
server = stationlist[client.get_origin_server_id()]
elapsed = elapsed - 1
Mt_est = Mt_est - 1
Mt_real = Mt_real - 1
#print(client.triptime)
#print(client.tripdistance)
if(user_def.dynamic_plot == 1):
process.join()
#Save statistics
df_statistics = stats_df(clientlist)
df_statistics.to_excel('./OUT/Statistics.xlsx', index=False, engine='xlsxwriter')
df_stat_node = stats_df_node_dt(clientlist)
df_stat_node.to_excel('./OUT/Node_Determination.xlsx', index=False, engine='xlsxwriter')
dfpath.to_excel('./OUT/Client_path.xlsx', index=False, engine='xlsxwriter')
#Copy data to digest path
data_to_digest(user_def)
return 1
if __name__ == "__main__":
start_time = time.time()
#Setup user definitions
if(len(sys.argv)<=1):
print('Missing Parameters:\n\tTo run default mode enter: main.py -d\n\tTo get help enter: main.py -h')
sys.exit(1)
argv = sys.argv[1:]
user_def_dict = handle_user_def (argv)
user_def = UserDef(user_def_dict)
setup_folders(user_def)
#define and initilize the dataframes and lists
dftrips = pd.read_csv("./Datasets/Mobility/" + user_def.dftrips_path)
dfstations = pd.read_csv("./Datasets/Network/" + user_def.dfstations_path)
df_LTE = pd.read_csv("./Datasets/Network/" + user_def.df_LTE_path)
dfvm = pd.read_csv("./Datasets/Vm/" + user_def.dfvm_path)
check_dataframes(dftrips, dfstations, df_LTE, dfvm, user_def)
dfnode = pd.DataFrame({'C_LAT': pd.Series([], dtype='float'),'C_LON':
|
pd.Series([], dtype='float')
|
pandas.Series
|
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S =
|
pd.Series([1.0, 2., 3., 4., 5.])
|
pandas.Series
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/16 18:36
Desc: 新股和风险警示股
新浪-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
"""
import math
import pandas as pd
import requests
def stock_zh_a_st_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 f:4,m:1 f:4',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
def stock_zh_a_new_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-新股
http://quote.eastmoney.com/center/gridlist.html#newshares
:return: 新股
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f26',
'fs': 'm:0 f:8,m:1 f:8',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
def stock_zh_a_stop_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-两网及退市
http://quote.eastmoney.com/center/gridlist.html#staq_net_board
:return: 两网及退市
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 s:3',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.t
|
o_numeric(temp_df['量比'], errors="coerce")
|
pandas.to_numeric
|
import numpy as np
import sys
import os
from neo import Block
from neo.io import AxonIO, Spike2IO
import pandas as pd
from pathlib import Path
from ec_code.phy_tools.utilities import nanzscore, nanzscoremedian, butter_highpass_filter
def load_traces(filenames, **kwargs):
"""Function to load flexibly either one file or multiple files concatenated.
"""
if type(filenames) == list:
if len(filenames) == 1:
return _load_trace(filenames[0], **kwargs)
else:
df = _load_trace(filenames[0], **kwargs)
for f in filenames[1:]:
new_df = _load_trace(f, **kwargs)
new_df["sweep"] += df["sweep"].max() + 1
df = pd.concat([df, new_df], axis=0)
return df
else:
if Path(filenames).is_dir():
files = list(Path(filenames).glob("*.abf"))
if len(files) > 0:
return load_traces(files, **kwargs)
else:
return _load_trace(filenames, **kwargs)
def _load_trace(filename, chan_names=[], verbose=True, artifact_chan=None,
artifact_thr_sd=20, zscore=False, highpass_coff=None):
"""
:param filename: .abf file to be loaded;
:param chan_names: new names for the channels; if "null", data won't be loaded;
:param verbose: print info abot the trace if true;
:return:
"""
# Time at the beginning of trace to be nan because of the artifact shape
REMOVE_ART_S = 0.70
filename = str(filename) # convert to string if Path object
if verbose:
print('loading {}...'.format(filename))
# Filetype from filename:
filetype = filename.split(".")[-1]
# Read binary file:
r = AxonIO(filename=filename)
bl = r.read_block(lazy=False)
# To be changed? read infos from the first block:
read_names = []
for sig in bl.segments[0].analogsignals:
read_names.append(sig.name)
fn = np.float(sig.sampling_rate)
print("Read info: Channels: {}; Sampling rate: {}".format(read_names, fn))
# If names are specified, overwrite:
for i, overwrite_name in enumerate(chan_names):
read_names[i] = overwrite_name
# Initialise data dictionary:
data = {'time': [], 'sweep': []}
for k in read_names:
if k is not 'null':
data[k] = []
# Iterate over sweeps:
artifact_positions = []
for i, seg in enumerate(bl.segments):
# Calculate sample count
time_vect = np.array(seg.analogsignals[0].times)
# If trace has to start from the artifact, find a new start_idx:
start_idx = 0
if artifact_chan is not None:
data_arr = seg.analogsignals[artifact_chan].as_array()
start_idx = find_artifact(data_arr, fn=fn,
artifact_thr_sd=artifact_thr_sd)
artifact_positions.append(start_idx/fn) # To check if all are found
# Get time and sweep number
time_vect = time_vect[start_idx:] - time_vect[start_idx]
data['time'] += [time_vect]
data['sweep'] += [np.ones(len(time_vect), dtype=int) * i]
# Append all channel traces, removing artifact if necessary:
for j, k in enumerate(read_names):
if k is not 'null': # if channels are excluded
data_arr = seg.analogsignals[j].as_array()[start_idx:, 0]
if highpass_coff is not None:
data_arr = butter_highpass_filter(data_arr,
highpass_coff,
fn, order=4)
if artifact_chan is not None:
data_arr[:int(REMOVE_ART_S * fn)] = np.nan
if zscore:
data_arr = nanzscoremedian(data_arr)
data[k] += [data_arr]
if verbose:
print("Artifacts not found: {}".format(0 in artifact_positions))
# Concatenate values in dictionary
for key in data.keys():
data[key] = np.squeeze(
np.concatenate([np.squeeze(x) for x in data[key]]))
return
|
pd.DataFrame(data)
|
pandas.DataFrame
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index =
|
Index(["a", "b", "c"], name=0)
|
pandas.Index
|
from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweetsent.nlp import Sentiment, SentimentCustom
import os
import numpy as np
import pandas as pd
import re
import altair as alt
# # # # TWITTER CLIENT # # # #
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
# # # # TWITTER AUTHENTICATER # # # #
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(os.environ.get('TWITTER_CONSUMER_API_KEY'), os.environ.get('TWITTER_CONSUMER_API_SECRET'))
auth.set_access_token(os.environ.get('TWITTER_ACCESS_TOKEN'), os.environ.get('TWITTER_ACCESS_TOKEN_SECRET'))
return auth
# # # # TWITTER STREAMER # # # #
class TwitterStreamer():
"""
Class for streaming and processing live tweets.
"""
def __init__(self):
self.twitter_autenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# This handles Twitter authetification and the connection to Twitter Streaming API
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_autenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
# This line filter Twitter Streams to capture data by the keywords:
stream.filter(track=hash_tag_list)
# # # # TWITTER STREAM LISTENER # # # #
class TwitterListener(StreamListener):
"""
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status == 420:
# Returning False on_data method in case rate limit occurs.
return False
print(status)
class TweetAnalyzer():
"""
Functionality for analyzing and categorizing content from tweets.
"""
#def clean_tweet(self, tweet):
# return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analyze_sentiment(self, tweet):
sent = SentimentCustom()
analysis = sent.analyze_sentiment(tweet)
score = analysis[0][0]
if score >= 0.6:
return "Positivo", score
elif (score >= 0.3) and (score <= 0.6):
return "Neutral", score
elif score < 0.3:
return "Negativo", score
else:
return "Check Custom Model Threshold"
def tweets_to_data_frame(self, tweets):
df =
|
pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
|
pandas.DataFrame
|
import json, time
from decimal import Decimal
import pandas as pd
import numpy as np
import requests
class API():
def __init__(self, base_url):
self.base_url = base_url
@staticmethod
def _http_error_message(e, r):
response_text = json.loads(r.text)['message']
return f'\n\nRequests HTTP error: {e}\n\n\tUrl: {r.url}\n\tStatus Code: {r.status_code}\n\tResponse Text: {response_text}\n\tNote: Check the url and endpoint\n'
@staticmethod
def _random_float_between_zero_one():
rand_int_below_ten = Decimal(str(np.random.randint(11)))
return float(rand_int_below_ten / Decimal('10'))
def get(self, endpoint, params={}, auth=None):
url = f'{self.base_url}{endpoint}'
try:
r = requests.get(url=url, auth=auth, params=params)
r.raise_for_status()
except requests.ConnectionError as e:
raise e
except requests.HTTPError as e:
raise requests.HTTPError(self._http_error_message(e, r))
else:
return r
def post(self, endpoint, params={}, data={}, auth=None):
url = f'{self.base_url}{endpoint}'
data = json.dumps(data)
try:
r = requests.post(url=url, auth=auth, params=params, data=data)
r.raise_for_status()
except requests.HTTPError as e:
raise requests.HTTPError(self._http_error_message(e, r))
except requests.ConnectTimeout as e:
raise e
except requests.ConnectionError as e:
raise e
else:
return r
def handle_page_nation(self, endpoint, start_date, date_field='created_at', params={}, auth=None):
all_results = []
def make_request(after=None):
response = self.get(endpoint, params={**params, 'after':after}, auth=auth)
end_cursor = response.headers.get('cb-after', None) # end of page index; used for older results
data = response.json()
number_of_results = len(data)
if number_of_results == 0:
# no data available in this page (request)
return
# flatten data;
df =
|
pd.json_normalize(data, sep='.')
|
pandas.json_normalize
|
from ast import operator
import csv
from datetime import datetime
from operator import index, mod
import os
import sys
import math
import time
import warnings
import itertools
import numpy as np
import pandas as pd
# import scrapbook as sb
import matplotlib.pyplot as plt
from pmdarima.arima import auto_arima
pd.options.display.float_format = "{:,.2f}".format
np.set_printoptions(precision=2)
warnings.filterwarnings("ignore")
print("System version: {}".format(sys.version))
# Forecasting settings
N_SPLITS = 1
HORIZON = 5 # Forecast 2 Days
GAP = 1
FIRST_WEEK = 40
LAST_WEEK = 138
# Parameters of ARIMA model
params = {
"seasonal": False,
"start_p": 0,
"start_q": 0,
"max_p": 5,
"max_q": 5,
"m": 52,
}
def readCSV():
# 读取csv至字典
csvFile = open("BCHAIN-MKPRU.csv", "r")
reader = csv.reader(csvFile)
date = []
price = []
# 建立空字典
result = {}
for item in reader:
# 忽略第一行
if reader.line_num == 1:
continue
result[item[0]] = float(item[1])
csvFile.close()
for k, v in result.items():
result[k] = math.log(v)
date.append(datetime.strptime(k, '%m/%d/%y'))
price.append(result[k])
return (result, date, price)
def getDatePrice(result):
date = []
price = []
for k, v in result.iterrows():
result[k] = math.log(v['Value'])
date.append(datetime.strptime(k, '%m/%d/%y'))
price.append(math.log(v['Value']))
return (date, price)
def createDF(date, price):
period = 20
date = date[-period:]
price = price[-period:]
mid = int(period * 0.7)
train_df = pd.DataFrame({'date': date[:mid], 'price': price[:mid]},
index=date[:mid], columns=['date', 'price'])
test_df = pd.DataFrame({'date': date[mid:], 'price': price[mid:]},
index=date[mid:], columns=['date', 'price'])
return (train_df, test_df)
def train(train_ts):
train_ts = np.array(train_ts.logmove)
model = auto_arima(
train_ts,
seasonal=params["seasonal"],
start_p=params["start_p"],
start_q=params["start_q"],
max_p=params["max_p"],
max_q=params["max_q"],
stepwise=True,
)
model.fit(train_ts)
def MAPE(predictions, actuals):
"""
Implements Mean Absolute Percent Error (MAPE).
Args:
predictions (array like): a vector of predicted values.
actuals (array like): a vector of actual values.
Returns:
numpy.float: MAPE value
"""
if not (isinstance(actuals, pd.Series) and isinstance(predictions, pd.Series)):
predictions, actuals = pd.Series(predictions), pd.Series(actuals)
return ((predictions - actuals).abs() / actuals).mean()
# 传入每日数据
def trainEveryDay(date, price):
day = len(date)
print(f'This is SIMU of the {day} DAY')
train_df, test_df = createDF(date, price)
train_ts = np.array(train_df.price)
model = auto_arima(
train_ts,
seasonal=params["seasonal"],
start_p=params["start_p"],
start_q=params["start_q"],
max_p=params["max_p"],
max_q=params["max_q"],
stepwise=True,
)
model.fit(train_ts)
# print(model.summary())
# model.plot_diagnostics(figsize=(10, 8))
# plt.show()
preds = model.predict(n_periods=GAP + HORIZON - 1)
predictions = np.round(np.exp(preds[-HORIZON:]))
test_date = test_df.date[:HORIZON]
pred_df = pd.DataFrame({"price": predictions},
index=test_date, columns=['price'])
test_ts = test_df.head(HORIZON)
train_ts = train_df
# ts 为 e次方后数据
test_ts.price = np.round(np.exp(test_ts.price))
train_ts.price = np.round(np.exp(train_ts.price))
all_ts =
|
pd.concat([train_ts, test_ts])
|
pandas.concat
|
import os
import argparse
import numpy as np
import pandas as pd
from time import time
from scipy.stats import norm
from scipy.spatial.distance import euclidean
from editing_dist_n_lcs_dp import edit_distance
from editing_dist_n_lcs_dp import lcs
#global variables
# BREAK_POINTS = []
# LOOKUP_TABLE = []
# TODO BUILD CLASS
# TODO find optimal VOCAB_SIZE & PAA_SIZE OR WINDOW_SIZE
# TODO compare multiple series
# TODO find motifs (cycles)
def matrix_to_df(cols, matrix):
"""
Convert matrix of time series to pd.DataFrame
"""
df = pd.DataFrame()
for i in range(len(cols)):
df[cols[i]] = matrix[i]
return df
def znorm(ts):
"""
Standardize data
"""
return (ts - np.mean(ts)) / np.std(ts)
def ts2paa(ts, paa_size):
"""
PAA algorithm implementation
The conde is inpired on the R SAX package code. For non-equidivisible PAA interval a weighted sum is applied,
The R package the weighted sum imlementationh has O(n * paa_size) complexity, instead this function has O(n) complexity.
"""
# convert ts to a single value
if paa_size == 1:
return np.array(np.mean(ts))
# use all ts' values
elif paa_size == ts.shape[0]:
return ts
# series' length is divisible by paa split
elif ts.shape[0] % paa_size == 0:
ts_split = np.reshape(ts, (paa_size, ts.shape[0]//paa_size))
return np.mean(ts_split, 1)
# ts' length is not divisible by paa split
# O(ts.shape[0]) complexity instead of O(ts.shape[0] * paa_size)
else:
ts_paa = np.zeros(paa_size)
carry = 0
n_vals = 0
paa_id = 0
weight = paa_size
for i in range(ts.shape[0]):
# update number of computed values
n_vals += paa_size
# set value's weight
weight = paa_size
# compute sum
ts_paa[paa_id] += weight * ts[i] + carry
# set carry
carry = 0
# verify integrety => update `weight` and compute `carry`
# update sum
if n_vals > ts.shape[0]:
# update weight to remove excess sum
weight = n_vals - ts.shape[0]
# remove excess
ts_paa[paa_id] -= weight * ts[i]
#compute paa value
ts_paa[paa_id] = ts_paa[paa_id] / ts.shape[0]
# update paa_id and aux. values
paa_id += 1
n_vals = weight
carry = weight * ts[i]
return ts_paa
def get_breakpoints(vocab_size):
"""
Devide series' area under N(0, 1) into `vocab_size` equal areas
Returns a np.array, where symbol: cut
Use inverse umulative distribution function
"""
probs = np.arange(0, vocab_size, 1) / vocab_size
# cumulative prob. function
return norm.ppf(probs)
# @deprecated
# use numpy instead (np.searchsorted(.))
def bin_search(val, arr):
"""
Adapted binary search (left)
if `val` is <= than `m` => compare with m-1, otherwise compare with m+1
Find symbol representation
Return index of symbol
"""
l = 0
r = arr.shape[0] - 1
while l <= r:
m = (l + r + 1) // 2
if arr[m] <= val:
# base case: m is right-most index
if m + 1 == arr.shape[0]:
return m
# compare `val` with right neighbour
elif val <= arr[m + 1]:
return m
l = m + 1
else:
#base case: `val` is <= than 2nd value index
if m <= 1:
return 0
# compare `val` with left neighbour
elif val > arr[m - 1]:
return m - 1
r = m - 1
return m
def val2symbol(ts_paa, vocab_size):
"""
Convert continuous time series values into discrete values,
using `vocab_size` discrete values
"""
vocab = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'], dtype=str)
#vocab = vocab[:vocab_size]
# compute breakpoints under a normal distribution ~ N(0, 1)
breakpoints = get_breakpoints(vocab_size)
# get ids for symbol conversion
symbol_ids = np.searchsorted(breakpoints, ts_paa) - 1
# convert ts to string
ts_symbol = vocab[symbol_ids]
return breakpoints, ts_symbol
def sax(ts, out_size, vocab_size, paa=True):
"""
Apply SAX algorithm to time series, i.e. convert continuous values series into
discrete values aggregated series
:ts - time series of continuous values, numpy.array
:out_size - the final output size of ts
:vocab_size - number of sumbols to use (# lelvels), the size of vacabolary
:paa - boolean variable, out_size is PAA if paa is True, out_size is Window size otherwise
"""
if paa:
paa_size = out_size
else:
paa_size = get_paa_size_from_window_size(ts.shape[0], out_size)
# Normalize series
ts_norm = znorm(ts)
# Convert normalized series to paa
ts_paa = ts2paa(ts_norm, paa_size)
# Convert paa series into symbols
breakpoints, ts_sax = val2symbol(ts_paa, vocab_size)
# Lookup table containing distance between symbols
dist_lookup_table = compute_dist_lookup_table(breakpoints)
return breakpoints, dist_lookup_table, ts_norm, ts_paa, ts_sax
def symbol2index(ts_sax):
"""
Converts symbol string to index values of symbols
ts_sax: series as symbols, i.e. sax representation of a series
"""
# lookup table for symbols' indeces
s2id = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7,
'i': 8, 'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15,
'q': 16, 'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23,
'y': 24, 'z': 25, 'A': 26, 'B': 27, 'C': 28, 'D': 29, 'E': 30, 'F': 31,
'G': 32, 'H': 33, 'I': 34, 'J': 35, 'K': 36, 'L': 37, 'M': 38, 'N': 39,
'O': 40, 'P': 41, 'Q': 42, 'R': 43, 'S': 44, 'T': 45, 'U': 46, 'V': 47,
'W': 48, 'X': 49, 'Y': 50, 'Z': 51}
# init. id series
ts_id = np.empty(ts_sax.shape[0], dtype=int)
# convert symbols to ids
for i in range(ts_sax.shape[0]):
ts_id[i] = s2id[ts_sax[i]]
return ts_id
def get_dists(ts1_sax, ts2_sax, lookup_table):
"""
Compute distance between each symbol of two words (series) using a lookup table
ts1_sax and ts2_sax are two sax representations (strings) built under the same conditions
"""
# Verify integrity
if ts1_sax.shape[0] != ts2_sax.shape[0]:
return -1
# convert symbol series into series of indexes (symbol indexes)
ts1_sax_id = symbol2index(ts1_sax)
ts2_sax_id = symbol2index(ts2_sax)
# array of distances between symbols
dists = np.zeros(ts1_sax.shape[0])
for i in range(ts1_sax_id.shape[0]):
dists[i] = lookup_table[ts1_sax_id[i], ts2_sax_id[i]]
return dists
def compute_mindist(n, lookup_table, ts1_sax, ts2_sax):
"""
Minimum distance between the original time series of two words
`n` is the original series' length
"""
aux = np.sqrt(n / ts1_sax.shape[0])
dists = get_dists(ts1_sax, ts2_sax, lookup_table)
dists_squares = np.square(dists)
dists_sum_squares = np.sum(dists_squares)
return aux * np.sqrt(dists_sum_squares)
def get_tightness_of_lower_bound(lookup_table, ts1, ts2, ts1_sax, ts2_sax):
"""
Compute the tightness of the lower bound
Used to find the parameters settings
"""
# compute euclidean distance between original series
or_dist = euclidean(ts1, ts2)
# compute MINDIST for sax series
mindist = compute_mindist(ts1.shape[0],lookup_table, ts1_sax, ts2_sax)
return mindist / or_dist
def compute_dist_lookup_table(breakpoints):
"""
The lookup table is computed as described in [X]
d(r, c) = |0, if |r - c| <= 1
|abs(breakpoints[i] - breakpoints[j-1]), otherwise
Contiguous values have distance 0, thus are not computed
"""
# init. matrix
lookup_table_dist = np.zeros((breakpoints.shape[0], breakpoints.shape[0]))
# compute distances
for bi in range(breakpoints.shape[0]):
# increment by 2, since contiguous values have distance 0
for bj in range(bi + 2, breakpoints.shape[0]):
# since breakpoints[0] = - np.inf and symbol is conditioned by <=
# bi is set to next value
# compute distance
dist = breakpoints[bj] - breakpoints[bi + 1]
# set distance
lookup_table_dist[bi, bj] = dist
# mirror
lookup_table_dist[bj, bi] = dist
return lookup_table_dist
def get_paa_size_from_window_size(n, window_size):
"""
Gets paa size from a sliding window size.
Use sliding window instead of symbol series.
"""
if n % window_size > 0:
return n // window_size + 1
return n // window_size
###############################################################################################
###############################################################################################
def main(args):
#CONSTATNS
MIN_VOCAB_SIZE = 1
MAX_VOCAB_SIZE = 52
MIN_PAA_SIZE = 1
######################
# Finding VOCAB_SIZE & PAA_SIZE. It is highly data dependent. Best values are those
# which minimize the tightness of the lowers bound
# Objective: Minimize(MINDIST(Â, Ê) / D(A, B)), i.e. Tightness of Lower Bound
# Read data (skips header)
data = np.loadtxt(args.data_path, delimiter=',', skiprows=1)
df = pd.read_csv(args.data_path)
data = df.as_matrix()
cols = list(df.columns)
#switch columns with rows (=>row is a time series)
data = data.T
#read arguments
# n = len of series
VOCAB_SIZE = args.vocab_size
PAA_SIZE = args.paa_size
WINDOW_SIZE = args.window_size
breakpoints_l = []
lookup_table_l = []
ts_norm_l = []
ts_paa_l = []
ts_sax_l = []
st = time()
print("Computing SAX...")
for ts in data:
# get number of obs.
n = ts.shape
#get PAA_SIZE or WINDOW_SIZE
if WINDOW_SIZE > 0:
PAA_SIZE = get_paa_size_from_window_size(n, WINDOW_SIZE)
# compute sax
breakpoints, lookup_table, ts_norm, ts_paa, ts_sax = sax(ts, PAA_SIZE, VOCAB_SIZE)
#add to list
breakpoints_l.append(breakpoints)
lookup_table_l.append(lookup_table)
ts_norm_l.append(ts_norm)
ts_paa_l.append(ts_paa)
ts_sax_l.append(ts_sax)
n_series = data.shape[0]
# compute TLS
tbl_df = pd.DataFrame()
edd_df = pd.DataFrame()
lcs_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Generates context csv files that can be used by a contextual bandit
algorithm
"""
import os
from datetime import datetime, timedelta
import json
from abc import ABC, abstractmethod
import pandas as pd
import numpy as np
import typing
from typing import List
import global_config
from . import trace_pre_processing as tpp
def generate_context_csv(
event_extractor,
paths:List[str],
seq: bool=True,
window_size:int=30,
step:int=5,
columns:List[str]=None,
outdir:str='./'
)->str:
"""Writes a context.csv file that can be processed by a contextual bandit
algorithm. It extracts features for each event in the passed traces.
Afterwards in aggregates the features using a sliding window approach
and writes the file to a desired location.
Args:
event_extractor (AbstractTraceExtractor): Used to extract the features
from individual events.
paths (string[]): Paths to the traces that will be used to generate the
file.
start (np.datetime64): Timestamp of the start for the context file. Only
events occuring between start and end will be used.
end (np.datetime64): Timestamp of the end for the context file.
window_size (int): Size of the sliding window.
step (int): Step of the sliding window.
columns (string[]): If not None, thae resulting DataFrame will have these
columns.
outdir (string): Directory where to write the generated file.
Returns:
String: Full path of the written file
"""
stime = datetime.now()
print('Generating context.csv...')
start = global_config.START_TRACES_SEQUENTIAL if seq else global_config.START_TRACES_CONCURRENT
end = global_config.END_TRACES_SEQUENTIAL if seq else global_config.END_TRACES_CONCURRENT
start = pd.to_datetime(start)
end = pd.to_datetime(end)
context_df = pd.DataFrame(index=pd.date_range(
start=start, end=end, freq='1S'))
file_paths = []
current_path = ''
for current_path in paths:
file_paths.extend(
list(map(lambda x: current_path + x, os.listdir(current_path))))
print('Total of %d traces will be used to generate the context.csv' %
len(file_paths))
i = 0
context_df_columns_set = set()
for current_fp in file_paths:
with open(current_fp) as open_file:
trace_json = json.load(open_file)
try:
event_features = event_extractor.extract_features_for_events(
trace_json)
except KeyError:
print('Skipped trace %s' % current_fp)
i = i + 1
continue
trace_df = pd.pivot_table(
pd.DataFrame(data=event_features),
index=['start'],
aggfunc=np.sum)
mask = (trace_df.index >=
|
pd.to_datetime(start)
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import os
import logging
import argparse
logging.getLogger().setLevel(logging.INFO)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Aggregate experiment results")
parser.add_argument("savepath",
help="savepath for the experiment folder. either relative to working directory or absolute.",
type=str)
args = parser.parse_args()
base_base_path = args.savepath
dirs = [ name for name in os.listdir(base_base_path) if os.path.isdir(os.path.join(base_base_path, name)) ]
for dir in dirs:
base_path = base_base_path + dir +'/'
r_types = ['individualized', 'subpopulation']
t_types = ['improvement', 'acceptance']
cols = ['eta_obs_model', 'eta_obs_refit', 'r_type', 't_type']
df =
|
pd.DataFrame([], columns=cols)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Routine to read and clean water quality data of wide/stacked formats
<NAME>, <NAME>
KWR, April-July 2020
Last edit: July 27. Not upodating any more and use the new version.
"""
import pandas as pd
import numpy as np
import logging
import os
import math
# from unit_converter.converter import convert, converts
import re
from molmass import Formula
# %% HGC.IO.defaults
# New definition of NaN. Based on default values of python with the following exception:
# 'NA' is left out to prevent NA (Sodium) being read as NaN.
NA_VALUES = ['#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', 'N/A', 'NULL', 'NaN', 'n/a', 'nan', 'null']
# The following dictionary should be extracted from HGC.constants and augmented (or overruled) by the user
DATAMODEL_HGC = {
'HGC_default_feature_units': {
'Fe': 'mg/L',
'SO4': 'mg/L',
'Al': 'µg/L',
},
}
UNIT_CONVERSION = {
'mm':0.001, 'cm':0.01, 'm':1.0, 'km':1000, # add length here
'ng':1e-9, 'μg':0.000001, 'mg':0.001, 'g':1.0, 'kg':1000, # add mass here
'mL':0.001, 'L':1.0, # add volumn here
'μS':1e-6, 'mS':0.001, 'S':1.0, # add conductivity here
'mV': 0.001, 'V':1.0, # add voltage here
'μmol':1e-6, 'mmol':0.001, 'mol':1.0, # add mol here
}
# The following keyworded arguments can be adjusted and merged with the configuration dictionary by the user
KWARGS = {
'na_values': NA_VALUES,
'encoding': 'ISO-8859-1',
'delimiter': None,
}
DEFAULT_FORMAT = {
'Value': 'float64',
'Feature': 'string',
'Unit': 'string',
'Date': 'date',
'LocationID': 'string',
'SampleID': 'string',
'X': 'float64',
'Y': 'float64',
}
# %% define sub-function to be called by the main function
def read_file(file_path='', sheet_name=0, na_values=NA_VALUES, encoding='ISO-8859-1', delimiter=None, **kwargs):
"""
Read pandas dataframe or file.
Parameters
----------
file_path : dataframe or string
string must refer to file. Currenlty, Excel and csv are supported
sheet_name : integer or string
optional, when using Excel file and not reading first sheet
na_values : list
list of strings that are recognized as NaN
"""
logger.info('Reading input file(s) now...')
if isinstance(file_path, pd.DataFrame):
# skipping reading if the input is already a df
df = file_path
# print('dataframe read: ' + [x for x in globals() if globals()[x] is file_path][0])
logger.info('A dataframe has been imported')
elif isinstance(file_path, str):
file_extension = file_path.split('.')[-1]
# filename, file_extension = os.path.splitext(file_path)
if (file_extension == 'xlsx') or (file_extension == 'xls'):
try:
df = pd.read_excel(file_path,
sheet_name=sheet_name,
header=None,
index_col=None,
na_values=na_values,
keep_default_na=False,
encoding=encoding)
logger.info('A excel spreadsheet has been imported')
except:
df = []
logger.error('Encountered an error when importing excel spreadsheet')
elif file_extension == 'csv':
try:
df = pd.read_csv(file_path,
encoding=encoding,
header=None,
index_col=None,
low_memory=False,
na_values=na_values,
keep_default_na=False,
delimiter=delimiter)
logger.info('A csv has been imported')
except:
df = []
logger.error('Encountered an error when importing csv')
else:
df= []
logger.error('Not a recognizable file. Need a csv or xls(x) file.')
else:
df= []
logger.error(['This file path is not recognized: '+file_path])
return df
def _get_slice(df, arrays):
""" Get values by slicing """
if isinstance(arrays[0], list): # check if the array is nested
series = pd.Series([], dtype='object')
for array in arrays:
series = series.append(df.iloc[array[0], array[1]].rename(0))
elif len(arrays) == 1: # only row specified
series = df.iloc[arrays[0]]
else: # row and column specified
series = df.iloc[arrays[0], arrays[1]]
return series
def get_headers_wide(df, slice_sample='', slice_feature='', slice_unit='', **kwargs):
""" Get column headers for a wide-format dataframe. """
# create series with headers
header_sample = _get_slice(df, slice_sample)
header_feature = _get_slice(df, slice_feature)
header_unit = _get_slice(df, slice_unit)
# get headers at 2 levels
ncols = len(df.columns)
level0 = pd.Series(ncols * [''])
level0[header_sample.index] = header_sample
level0[header_feature.index] = header_feature
level1 = pd.Series(ncols * [''])
level1[header_unit.index] = header_unit
# add series by multi-index headers
df.columns = pd.MultiIndex.from_arrays([level0, level1])
logger.info('Got column headers for a wide-format dataframe.')
return df, header_sample, header_feature, header_unit
def get_headers_stacked(df, slice_sample='', **kwargs):
""" Get column headers for a stacked-format dataframe. """
# create series with headers
header_sample = _get_slice(df, slice_sample)
# add column names
ncols = len(df.columns)
level0 = pd.Series(ncols * [''])
level0[header_sample.index] = header_sample
df.columns = level0
return df, header_sample
def slice_rows_with_data(df, slice_data=None, **kwargs):
""" Getting needed data by pre-defined slicing blocks """
df2 =
|
pd.DataFrame([])
|
pandas.DataFrame
|
from tkinter import ttk
import tkinter as tk
from eosim.config import GuiStyle, MissionConfig
import eosim.gui.helpwindow as helpwindow
from eosim import config
from eosim.gui.mapprojections import Mercator, EquidistantConic, LambertConformal, Robinson, LambertAzimuthalEqualArea, Gnomonic
import instrupy
import pandas as pd
import numpy as np
import tkinter
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import logging
logger = logging.getLogger(__name__)
class PlotMapVars(instrupy.util.EnumEntity):
TIME = "Time"
ALT = "Altitude [km]"
INC = "Inclination [deg]"
TA = "True Anomaly [km]"
RAAN = "RAAN [deg]"
AOP = "AOP [deg]"
ECC = "ECC"
SPD = "ECI Speed [km/s]"
@classmethod
def get_orbitpy_file_column_header(cls, var):
if(var==cls.INC):
return "INC[deg]"
elif(var==cls.RAAN):
return "RAAN[deg]"
elif(var==cls.AOP):
return "AOP[deg]"
elif(var==cls.TA):
return "TA[deg]"
elif(var==cls.ECC):
return "ECC"
else:
return False # could be a derived variable
@classmethod
def get_data_from_orbitpy_file(cls, sat_df, sat_id, var, step_size, epoch_JDUT1):
''' Get data frame the orbitpy resultant output files '''
_header = PlotMapVars.get_orbitpy_file_column_header(var)
if(_header is not False):
if _header == sat_df.index.name:
data = sat_df.index
else:
data = sat_df[_header]
else:
# a derived variable
if(var == cls.TIME):
data = np.array(sat_df.index) * step_size # index = "TimeIndex"
_header = 'Time[s]'
elif(var == cls.ALT):
sat_dist = []
sat_dist = np.array(sat_df["X[km]"])*np.array(sat_df["X[km]"]) + np.array(sat_df["Y[km]"])*np.array(sat_df["Y[km]"]) + np.array(sat_df["Z[km]"])*np.array(sat_df["Z[km]"])
sat_dist = np.sqrt(sat_dist)
data = np.array(sat_dist) - instrupy.util.Constants.radiusOfEarthInKM
_header = 'Alt[km]'
elif(var==cls.SPD):
data = np.array(sat_df["VX[km/s]"])*np.array(sat_df["VX[km/s]"]) + np.array(sat_df["VY[km/s]"])*np.array(sat_df["VY[km/s]"]) + np.array(sat_df["VZ[km/s]"])*np.array(sat_df["VZ[km/s]"])
data = np.sqrt(data)
_header = 'Speed[km/s]'
return [str(sat_id)+'.'+_header, data]
class MapVisPlotAttibutes():
def __init__(self, proj=None, sat_id=None, var=None, time_start=None, time_end=None):
self.sat_id = sat_id if sat_id is not None else list()
self.var = var if var is not None else list()
self.proj = proj if proj is not None else None
self.time_start = time_start if time_start is not None else None
self.time_end = time_end if time_end is not None else None
def update_variables(self, sat_id, var):
self.sat_id.append(sat_id)
self.var.append(var)
def update_projection(self, proj):
self.proj = proj
def reset_variables(self):
self.sat_id = list()
self.var = list()
def update_time_interval(self, time_start, time_end):
self.time_start = time_start
self.time_end = time_end
def get_projection(self):
return self.proj
def get_variables(self):
return [self.sat_id, self.var]
def get_time_interval(self):
return [self.time_start, self.time_end]
class VisMapFrame(ttk.Frame):
def __init__(self, win, tab):
self.vis_map_attr = MapVisPlotAttibutes() # data structure storing the mapping attributes
# map plots frame
vis_map_frame = ttk.Frame(tab)
vis_map_frame.pack(expand = True, fill ="both", padx=10, pady=10)
vis_map_frame.rowconfigure(0,weight=1)
vis_map_frame.rowconfigure(1,weight=1)
vis_map_frame.columnconfigure(0,weight=1)
vis_map_frame.columnconfigure(1,weight=1)
vis_map_frame.columnconfigure(2,weight=1)
vis_map_time_frame = ttk.LabelFrame(vis_map_frame, text='Set Time Interval', labelanchor='n')
vis_map_time_frame.grid(row=0, column=0, sticky='nswe', padx=(10,0))
vis_map_time_frame.rowconfigure(0,weight=1)
vis_map_time_frame.rowconfigure(1,weight=1)
vis_map_time_frame.rowconfigure(2,weight=1)
vis_map_time_frame.columnconfigure(0,weight=1)
vis_map_time_frame.columnconfigure(1,weight=1)
vis_map_proj_frame = ttk.LabelFrame(vis_map_frame, text='Set Map Projection', labelanchor='n')
vis_map_proj_frame.grid(row=0, column=1, sticky='nswe')
vis_map_proj_frame.columnconfigure(0,weight=1)
vis_map_proj_frame.rowconfigure(0,weight=1)
vis_map_proj_frame.rowconfigure(1,weight=1)
vis_map_proj_type_frame = ttk.Frame(vis_map_proj_frame)
vis_map_proj_type_frame.grid(row=0, column=0)
proj_specs_container = ttk.Frame(vis_map_proj_frame)
proj_specs_container.grid(row=1, column=0, sticky='nswe')
proj_specs_container.columnconfigure(0,weight=1)
proj_specs_container.rowconfigure(0,weight=1)
proj_specs_container_frames = {}
for F in (Mercator, EquidistantConic, LambertConformal,Robinson,LambertAzimuthalEqualArea,Gnomonic):
page_name = F.__name__
self._prj_typ_frame = F(parent=proj_specs_container, controller=self)
proj_specs_container_frames[page_name] = self._prj_typ_frame
self._prj_typ_frame.grid(row=0, column=0, sticky="nsew")
self._prj_typ_frame = proj_specs_container_frames['Mercator'] # default projection type
self._prj_typ_frame.tkraise()
vis_map_var_frame = ttk.LabelFrame(vis_map_frame, text='Set Variable(s)', labelanchor='n')
vis_map_var_frame.grid(row=0, column=2, sticky='nswe')
vis_map_var_frame.columnconfigure(0,weight=1)
vis_map_var_frame.rowconfigure(0,weight=1)
vis_map_var_frame.rowconfigure(1,weight=1)
vis_map_plot_frame = ttk.Frame(vis_map_frame)
vis_map_plot_frame.grid(row=1, column=0, columnspan=3, sticky='nswe', pady=(10,2))
vis_map_plot_frame.columnconfigure(0,weight=1)
vis_map_plot_frame.columnconfigure(1,weight=1)
vis_map_plot_frame.rowconfigure(0,weight=1)
# time interval frame
ttk.Label(vis_map_time_frame, text="Time (hh:mm:ss) from mission-epoch", wraplength="110", justify='center').grid(row=0, column=0,columnspan=2,ipady=5)
ttk.Label(vis_map_time_frame, text="From").grid(row=1, column=0, sticky='ne')
self.vis_map_time_from_entry = ttk.Entry(vis_map_time_frame, width=10, takefocus = False)
self.vis_map_time_from_entry.grid(row=1, column=1, sticky='nw', padx=10)
self.vis_map_time_from_entry.insert(0,'00:00:00')
self.vis_map_time_from_entry.bind("<FocusIn>", lambda args: self.vis_map_time_from_entry.delete('0', 'end'))
ttk.Label(vis_map_time_frame, text="To").grid(row=2, column=0, sticky='ne')
self.vis_map_time_to_entry = ttk.Entry(vis_map_time_frame, width=10, takefocus = False)
self.vis_map_time_to_entry.grid(row=2, column=1, sticky='nw', padx=10)
self.vis_map_time_to_entry.insert(0,'10:00:00')
self.vis_map_time_to_entry.bind("<FocusIn>", lambda args: self.vis_map_time_to_entry.delete('0', 'end'))
# projection
PROJ_TYPES = ['Mercator', 'EquidistantConic', 'LambertConformal', 'Robinson', 'LambertAzimuthalEqualArea', 'Gnomonic']
self._proj_type = tk.StringVar() # using self so that the variable is retained even after exit from the function
self._proj_type.set("Mercator") # initialize
def proj_type_combobox_change(event=None):
if self._proj_type.get() == "Mercator":
self._prj_typ_frame = proj_specs_container_frames['Mercator']
elif self._proj_type.get() == "EquidistantConic":
self._prj_typ_frame = proj_specs_container_frames['EquidistantConic']
elif self._proj_type.get() == "LambertConformal":
self._prj_typ_frame = proj_specs_container_frames['LambertConformal']
elif self._proj_type.get() == "Robinson":
self._prj_typ_frame = proj_specs_container_frames['Robinson']
elif self._proj_type.get() == "LambertAzimuthalEqualArea":
self._prj_typ_frame = proj_specs_container_frames['LambertAzimuthalEqualArea']
elif self._proj_type.get() == "Gnomonic":
self._prj_typ_frame = proj_specs_container_frames['Gnomonic']
self._prj_typ_frame.tkraise()
projtype_combo_box = ttk.Combobox(vis_map_proj_type_frame,
values=PROJ_TYPES, textvariable = self._proj_type, width=25)
projtype_combo_box.current(0)
projtype_combo_box.grid(row=0, column=0)
projtype_combo_box.bind("<<ComboboxSelected>>", proj_type_combobox_change)
vis_map_var_sel_btn = ttk.Button(vis_map_var_frame, text="Var(s)", command=self.click_select_var_btn)
vis_map_var_sel_btn.grid(row=0, column=0)
self.vis_map_var_sel_disp = tk.Text(vis_map_var_frame, state='disabled',height = 2, width = 3, background="light grey")
self.vis_map_var_sel_disp.grid(row=1, column=0, sticky='nsew', padx=20, pady=20)
# plot frame
plot_btn = ttk.Button(vis_map_plot_frame, text="Plot", command=self.click_plot_btn)
plot_btn.grid(row=0, column=0, sticky='e', padx=20)
def click_select_var_btn(self):
# reset any previously configured variables
self.vis_map_attr.reset_variables()
# create window to ask which satellite
select_var_win = tk.Toplevel()
select_var_win.rowconfigure(0,weight=1)
select_var_win.rowconfigure(1,weight=1)
select_var_win.columnconfigure(0,weight=1)
select_var_win.columnconfigure(1,weight=1)
select_sat_win_frame = ttk.LabelFrame(select_var_win, text='Select Satellite')
select_sat_win_frame.grid(row=0, column=0, padx=10, pady=10)
select_var_frame = ttk.LabelFrame(select_var_win, text='Select Variable')
select_var_frame.grid(row=0, column=1, padx=10, pady=10)
okcancel_frame = ttk.Label(select_var_win)
okcancel_frame.grid(row=1, column=0, columnspan=2, padx=10, pady=10)
# place the widgets in the frame
available_sats = config.out_config.get_satellite_ids() # get all available sats for which outputs are available
sats_combo_box = ttk.Combobox(select_sat_win_frame,
values=available_sats)
sats_combo_box.current(0)
sats_combo_box.grid(row=0, column=0)
self._vis_map_var= tk.StringVar() # using self so that the variable is retained even after exit from the function, make sure variable name is unique
j = 0
k = 0
for _var in list(PlotMapVars):
var_rbtn = ttk.Radiobutton(select_var_frame, text=_var, variable=self._vis_map_var, value=_var)
var_rbtn.grid(row=j, column=k, sticky='w')
j = j + 1
if(j==5):
j=0
k=k+1
def click_ok_btn():
self.vis_map_attr.update_variables(sats_combo_box.get(), self._vis_map_var.get())
def click_exit_btn():
self.vis_map_var_sel_disp.configure(state='normal')
self.vis_map_var_sel_disp.delete(1.0,'end')
[sats, vars] = self.vis_map_attr.get_variables()
vars_str = [str(sats[k]+'.'+vars[k]) for k in range(0,len(sats))]
self.vis_map_var_sel_disp.insert(1.0,' '.join(vars_str))
self.vis_map_var_sel_disp.configure(state='disabled')
select_var_win.destroy()
ok_btn = ttk.Button(okcancel_frame, text="Add", command=click_ok_btn, width=15)
ok_btn.grid(row=0, column=0, sticky ='e')
cancel_btn = ttk.Button(okcancel_frame, text="Exit", command=click_exit_btn, width=15)
cancel_btn.grid(row=0, column=1, sticky ='w')
def update_time_interval_in_attributes_variable(self):
# read the plotting time interval
time_start = str(self.vis_map_time_from_entry.get()).split(":") # split and reverse list
time_start.reverse()
# convert to seconds
x = 0
for k in range(0,len(time_start)):
x = x + float(time_start[k]) * (60**k)
time_start_s = x
time_end = str(self.vis_map_time_to_entry.get()).split(":") # split and reverse list
time_end.reverse()
# convert to seconds
x = 0
for k in range(0,len(time_end)):
x = x + float(time_end[k]) * (60**k)
time_end_s = x
self.vis_map_attr.update_time_interval(time_start_s, time_end_s)
def update_projection_in_attributes_variable(self):
proj = self._prj_typ_frame.get_specs()
self.vis_map_attr.update_projection(proj)
def click_plot_btn(self):
""" Make projected plots of the variables indicated in :code:`self.vis_map_attr` instance variable.
"""
self.update_time_interval_in_attributes_variable()
self.update_projection_in_attributes_variable()
[time_start_s, time_end_s] = self.vis_map_attr.get_time_interval()
proj = self.vis_map_attr.get_projection()
# get the variable data
[sat_id, var] = self.vis_map_attr.get_variables()
# get the epoch and time-step from the file belonging to the first vraible (common among all variables)
sat_state_fp = config.out_config.get_satellite_state_fp()[config.out_config.get_satellite_ids().index(sat_id[0])]
# read the epoch and time-step size and fix the start and stop indices
epoch_JDUT1 = pd.read_csv(sat_state_fp, skiprows = [0], nrows=1, header=None).astype(str) # 2nd row contains the epoch
epoch_JDUT1 = float(epoch_JDUT1[0][0].split()[2])
step_size = pd.read_csv(sat_state_fp, skiprows = [0,1], nrows=1, header=None).astype(str) # 3rd row contains the stepsize
step_size = float(step_size[0][0].split()[4])
logger.debug("epoch_JDUT1 is " + str(epoch_JDUT1))
logger.debug("step_size is " + str(step_size))
time_start_index = int(time_start_s/step_size)
time_end_index = int(time_end_s/step_size)
sat_state_df = pd.read_csv(sat_state_fp,skiprows = [0,1,2,3])
sat_state_df.set_index('TimeIndex', inplace=True)
min_time_index = min(sat_state_df.index)
max_time_index = max(sat_state_df.index)
if(time_start_index < min_time_index or time_start_index > max_time_index or
time_end_index < min_time_index or time_end_index > max_time_index or
time_start_index > time_end_index):
logger.info("Please enter valid time-interval.")
return
sat_state_df = sat_state_df.iloc[time_start_index:time_end_index]
plt_data = pd.DataFrame(index=sat_state_df.index)
# iterate over the list of vars
num_vars = len(var)
varname = []
for k in range(0,num_vars):
# extract the y-variable data from of the particular satellite
# cartesian eci state file
_sat_state_fp = config.out_config.get_satellite_state_fp()[config.out_config.get_satellite_ids().index(sat_id[k])]
_sat_state_df =
|
pd.read_csv(_sat_state_fp,skiprows = [0,1,2,3])
|
pandas.read_csv
|
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.dates as mdates
from io import BytesIO
import base64
import gc
import config
def incoming_data(stock):
# config.py stores the Alpha Vantage API key (https://www.alphavantage.co/support/#api-key)
ts = TimeSeries(key = config.api_key)
# compact is prior 100 days of data
data_recent = ts.get_daily_adjusted(stock, outputsize = 'compact')
# full is up to 20 years of data (if available)
data_old = ts.get_daily_adjusted(stock, outputsize = 'full')
date_recent = []
close_recent = []
date_old = []
close_old = []
now = datetime.now()
# specifies how many historical days to include on the recent chart
lookback = 200
# iterates through recent data and appends date & close lists with data and close data for lookback period
for keys in data_recent[0].keys():
if (datetime.toordinal(
|
pd.to_datetime(keys)
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from blocktorch.data_checks import DataCheckAction, DataCheckActionCode
from blocktorch.model_family import ModelFamily
from blocktorch.pipelines import (
BinaryClassificationPipeline,
MulticlassClassificationPipeline,
RegressionPipeline,
)
from blocktorch.pipelines.components import (
DateTimeFeaturizer,
DelayedFeatureTransformer,
DropColumns,
DropNullColumns,
DropRowsTransformer,
EmailFeaturizer,
Estimator,
Imputer,
LinearRegressor,
LogisticRegressionClassifier,
LogTransformer,
OneHotEncoder,
SklearnStackedEnsembleClassifier,
SklearnStackedEnsembleRegressor,
StandardScaler,
TargetImputer,
TextFeaturizer,
Transformer,
URLFeaturizer,
)
from blocktorch.pipelines.utils import (
_get_pipeline_base_class,
_make_component_list_from_actions,
generate_pipeline_code,
get_estimators,
make_pipeline,
)
from blocktorch.problem_types import ProblemTypes, is_regression, is_time_series
@pytest.fixture
def get_test_data_from_configuration():
def _get_test_data_from_configuration(
input_type, problem_type, column_names=None, lognormal_distribution=False
):
X_all = pd.DataFrame(
{
"all_null": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
* 2,
"numerical": range(14),
"categorical": ["a", "b", "a", "b", "b", "a", "b"] * 2,
"dates": pd.date_range("2000-02-03", periods=14, freq="W"),
"text": [
"this is a string",
"this is another string",
"this is just another string",
"blocktorch should handle string input",
"cats are gr8",
"hello world",
"blocktorch is gr8",
]
* 2,
"email": [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
]
* 2,
"url": [
"https://blocktorch.alteryx.com/en/stable/",
"https://woodwork.alteryx.com/en/stable/guides/statistical_insights.html",
"https://twitter.com/AlteryxOSS",
"https://www.twitter.com/AlteryxOSS",
"https://www.blocktorch.alteryx.com/en/stable/demos/text_input.html",
"https://github.com/alteryx/blocktorch",
"https://github.com/alteryx/featuretools",
]
* 2,
"ip": [
"0.0.0.0",
"172.16.17.32",
"192.168.127.12",
"192.168.3.11",
"192.168.3.11",
"192.168.1.1",
"255.255.255.255",
]
* 2,
}
)
y = pd.Series([0, 0, 1, 0, 0, 1, 1] * 2)
if problem_type == ProblemTypes.MULTICLASS:
y = pd.Series([0, 2, 1, 2, 0, 2, 1] * 2)
elif is_regression(problem_type):
if lognormal_distribution:
y = pd.Series([1, 1, 1, 2, 3, 6, 9] * 2)
else:
y = pd.Series([1, 2, 3, 3, 3, 4, 5] * 2)
X = X_all[column_names]
if input_type == "ww":
logical_types = {}
if "text" in column_names:
logical_types.update({"text": "NaturalLanguage"})
if "categorical" in column_names:
logical_types.update({"categorical": "Categorical"})
if "url" in column_names:
logical_types.update({"url": "URL"})
if "email" in column_names:
logical_types.update({"email": "EmailAddress"})
X.ww.init(logical_types=logical_types)
y = ww.init_series(y)
return X, y
return _get_test_data_from_configuration
@pytest.mark.parametrize("lognormal_distribution", [True, False])
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@pytest.mark.parametrize("problem_type", ProblemTypes.all_problem_types)
@pytest.mark.parametrize(
"test_description, column_names",
[
("all nan is not categorical", ["all_null", "numerical"]),
("mixed types", ["all_null", "categorical", "dates", "numerical"]),
("no all_null columns", ["numerical", "categorical", "dates"]),
("date, numerical", ["dates", "numerical"]),
("only text", ["text"]),
("only dates", ["dates"]),
("only numerical", ["numerical"]),
("only ip", ["ip"]),
("only all_null", ["all_null"]),
("only categorical", ["categorical"]),
("text with other features", ["text", "numerical", "categorical"]),
("url with other features", ["url", "numerical", "categorical"]),
("ip with other features", ["ip", "numerical", "categorical"]),
("email with other features", ["email", "numerical", "categorical"]),
],
)
def test_make_pipeline(
problem_type,
input_type,
lognormal_distribution,
test_description,
column_names,
get_test_data_from_configuration,
):
X, y = get_test_data_from_configuration(
input_type,
problem_type,
column_names=column_names,
lognormal_distribution=lognormal_distribution,
)
estimators = get_estimators(problem_type=problem_type)
pipeline_class = _get_pipeline_base_class(problem_type)
for estimator_class in estimators:
if problem_type in estimator_class.supported_problem_types:
parameters = {}
if is_time_series(problem_type):
parameters = {
"pipeline": {
"date_index": None,
"gap": 1,
"max_delay": 1,
"forecast_horizon": 3,
},
}
pipeline = make_pipeline(X, y, estimator_class, problem_type, parameters)
assert isinstance(pipeline, pipeline_class)
delayed_features = (
[DelayedFeatureTransformer]
if is_time_series(problem_type)
and estimator_class.model_family != ModelFamily.ARIMA
else []
)
ohe = (
[OneHotEncoder]
if estimator_class.model_family != ModelFamily.CATBOOST
and (
any(
ltype in column_names
for ltype in ["url", "email", "categorical"]
)
)
else []
)
datetime = (
[DateTimeFeaturizer]
if estimator_class.model_family
not in [ModelFamily.ARIMA, ModelFamily.PROPHET]
and "dates" in column_names
else []
)
standard_scaler = (
[StandardScaler]
if estimator_class.model_family == ModelFamily.LINEAR_MODEL
else []
)
log_transformer = (
[LogTransformer]
if lognormal_distribution and is_regression(problem_type)
else []
)
drop_null = [DropNullColumns] if "all_null" in column_names else []
text_featurizer = (
[TextFeaturizer]
if "text" in column_names and input_type == "ww"
else []
)
email_featurizer = [EmailFeaturizer] if "email" in column_names else []
url_featurizer = [URLFeaturizer] if "url" in column_names else []
imputer = (
[]
if ((column_names in [["ip"], ["dates"]]) and input_type == "ww")
or (
(column_names in [["ip"], ["text"], ["dates"]])
and input_type == "pd"
)
else [Imputer]
)
drop_col = (
[DropColumns]
if any(ltype in column_names for ltype in ["text"])
and input_type == "pd"
else []
)
expected_components = (
log_transformer
+ email_featurizer
+ url_featurizer
+ drop_null
+ text_featurizer
+ drop_col
+ imputer
+ datetime
+ delayed_features
+ ohe
+ standard_scaler
+ [estimator_class]
)
assert pipeline.component_graph.compute_order == [
component.name for component in expected_components
], test_description
def test_make_pipeline_problem_type_mismatch():
with pytest.raises(
ValueError,
match=f"{LogisticRegressionClassifier.name} is not a valid estimator for problem type",
):
make_pipeline(
pd.DataFrame(),
pd.Series(),
LogisticRegressionClassifier,
ProblemTypes.REGRESSION,
)
with pytest.raises(
ValueError,
match=f"{LinearRegressor.name} is not a valid estimator for problem type",
):
make_pipeline(
pd.DataFrame(), pd.Series(), LinearRegressor, ProblemTypes.MULTICLASS
)
with pytest.raises(
ValueError,
match=f"{Transformer.name} is not a valid estimator for problem type",
):
make_pipeline(pd.DataFrame(),
|
pd.Series()
|
pandas.Series
|
def dataframe_diff(df_x,df_y,key):
import pandas as pd
set_x=['df_x' for i in range(len(df_x))]
df_x['sets']=set_x
set_y=['df_y' for i in range(len(df_y))]
df_y['sets']=set_y
columns=list(df_x.columns)
columns.remove('sets')
df_concat=pd.concat([df_x,df_y]).drop_duplicates(subset=columns,keep=False).reset_index(drop=True)
df_set1=df_concat[df_concat['sets']=='df_x']
df_set2=df_concat[df_concat['sets']=='df_y']
df_merged=pd.merge(df_set1, df_set2, on=key)
nonkey=set(columns)- set(key)
list_diff=[]
for i in range(len(df_merged)):
for col in nonkey:
if df_merged.iloc[i][col + '_x'] != df_merged.iloc[i][col + '_y']:
list_diff.append(list(df_merged.iloc[i][key + [col + '_x',col + '_y']]) + [col])
df_diff=pd.DataFrame(list_diff,columns=key + ['value' + '_x','value' + '_y' ,'column_name'] )
df_additional =
|
pd.concat([df_x,df_y])
|
pandas.concat
|
import unittest
import os
import subprocess
import tempfile
import shutil
import pprint
from pathlib import Path
import numpy as np
class Test_pipeline(unittest.TestCase):
def test_feature_extractor_w_norm(self):
from npc_radiomics.feature_extractor import FeatureExtractor
from mnts.mnts_logger import MNTSLogger
globber = "^[0-9]+"
p_im = Path('../samples/images_not_normalized/')
p_seg = Path('../samples/segment/')
p_setting = Path('../samples/sample_pyrad_settings.yml')
with MNTSLogger('./', keep_file=False, verbose=True, log_level='debug') as logger, \
tempfile.TemporaryDirectory() as f:
# Create feature extractor
logger.info("{:-^50s}".format(" Testing feature extraction "))
fe = FeatureExtractor(id_globber=globber)
fe.param_file = p_setting
df = fe.extract_features_with_norm(p_im, p_seg, param_file = p_setting)
fe.save_features(p_setting.with_name('sample_features.xlsx'))
logger.info("\n" + df.to_string())
self.assertTrue(len(df) > 0)
self.assertTrue(p_setting.with_name('sample_features.xlsx').is_file())
logger.info("Feature extraction pass...")
# test save state
logger.info("{:-^50s}".format(" Testing save state "))
fe.save(Path('../samples/fe_saved_state.fe'))
fe.save(Path(f))
self.assertTrue(Path(f).joinpath('saved_state.fe').is_file())
# test load state
logger.info("{:-^50s}".format(" Testing load state "))
_fe = FeatureExtractor(id_globber=globber)
_fe.load(Path(f).joinpath('saved_state.fe'))
_df = fe.extract_features_with_norm(p_im, p_seg)
logger.info(f"Left:\n {_df.to_string()}")
logger.info(f"Right:\n {df.to_string()}")
def test_feature_extractor(self):
from npc_radiomics.feature_extractor import FeatureExtractor
from mnts.mnts_logger import MNTSLogger
globber = "^[0-9]+"
p_im = Path('../samples/images/')
p_seg = Path('../samples/segment')
p_setting = Path('../samples/sample_pyrad_settings.yml')
logger = MNTSLogger('./', keep_file=False, verbose=True, log_level='debug')
with tempfile.TemporaryDirectory() as f:
# Create feature extractor
logger.info("{:-^50s}".format(" Testing feature extraction "))
fe = FeatureExtractor(id_globber=globber, idlist=['1130', '1131'])
fe.param_file = p_setting
df = fe.extract_features(p_im, p_seg, param_file=p_setting)
fe.save_features(p_setting.with_name('sample_features.xlsx'))
logger.info("\n" + df.to_string())
self.assertTrue(len(df) > 0)
self.assertTrue(p_setting.with_name('sample_features.xlsx').is_file())
logger.info("Feature extraction pass...")
# test save state
logger.info("{:-^50s}".format(" Testing save state "))
fe.save(Path(f))
self.assertTrue(Path(f).joinpath('saved_state.fe').is_file())
# test load state
logger.info("{:-^50s}".format(" Testing load state "))
_fe = FeatureExtractor(id_globber=globber)
_fe.load(Path(f).joinpath('saved_state.fe'))
logger.debug(f"Loaded state: {_fe.saved_state}")
_df = fe.extract_features(p_im, p_seg)
# Display tested itmes
logger.info(f"Left:\n {_df.to_string()}")
logger.info(f"Right:\n {df.to_string()}")
def test_feature_extractor_w_aug(self):
from npc_radiomics.feature_extractor import FeatureExtractor
from mnts.mnts_logger import MNTSLogger
import torchio as tio
globber = "^[0-9]+"
p_im = Path('../samples/images/')
p_seg_A = Path('../samples/segment')
p_seg_B = Path('../samples/segment')
p_setting = Path('../samples/sample_pyrad_settings.yml')
transform = tio.Compose([
tio.ToCanonical(),
tio.RandomAffine(scales=[0.95, 1.05],
degrees=10),
tio.RandomFlip(axes='lr'),
tio.RandomNoise(mean=0, std=[0, 1])
])
logger = MNTSLogger('./', keep_file=False, verbose=True, log_level='debug')
with tempfile.TemporaryDirectory() as f:
# Create feature extractor
logger.info("{:-^50s}".format(" Testing feature extraction "))
fe = FeatureExtractor(id_globber=globber)
fe.param_file = p_setting
df = fe.extract_features(p_im, p_seg_A, p_seg_B, param_file=p_setting, augmentor=transform)
fe.save_features(p_setting.with_name('sample_features.xlsx'))
logger.info("\n" + df.to_string())
self.assertTrue(len(df) > 0)
self.assertTrue(p_setting.with_name('sample_features.xlsx').is_file())
logger.info("Feature extraction pass...")
def test_feature_extractor_param_file_load(self):
from npc_radiomics.feature_extractor import FeatureExtractor
from mnts.mnts_logger import MNTSLogger
globber = "^[0-9]+"
p_im = Path('../samples/images/')
p_seg = Path('../samples/segment')
p_setting = Path('../samples/sample_pyrad_settings.yml')
with MNTSLogger('./', keep_file=False, verbose=True, log_level='debug') as logger:
fe = FeatureExtractor(id_globber="^[0-9]+")
fe.param_file = p_setting
logger.info(f"Processed setting: {fe.param_file}")
logger.info(f"Saved state: {pprint.pformat(fe.saved_state)}")
self.assertFalse(fe.param_file == p_setting)
self.assertTrue(fe.param_file == p_setting.read_text())
def test_get_radiomics_features_w_aug(self):
from npc_radiomics.feature_extractor import FeatureExtractor, get_radiomics_features
from mnts.mnts_logger import MNTSLogger
from mnts.utils import get_unique_IDs, get_fnames_by_IDs
import torchio as tio
import pandas as pd
import os
globber = "^[0-9]+"
p_im = Path('../samples/images/')
p_seg_A = Path('../samples/segment')
p_seg_B = Path('../samples/segment')
p_setting = Path('../samples/sample_pyrad_settings.yml')
transform = tio.Compose([
tio.ToCanonical(),
tio.RandomAffine(scales=[0.95, 1.05],
degrees=10),
tio.RandomFlip(axes='lr'),
tio.RandomNoise(mean=0, std=[0, 1])
])
with MNTSLogger('./', keep_file=False, verbose=True, log_level='debug') as logger:
dfs = []
ids = get_unique_IDs(os.listdir(str(p_im)), "^[0-9]+")
logger.info(f"IDs: {ids}")
im_fs = get_fnames_by_IDs(os.listdir(str(p_im)), ids)
seg_a_fs = get_fnames_by_IDs(os.listdir(str(p_seg_A)), ids)
seg_b_fs = get_fnames_by_IDs(os.listdir(str(p_seg_B)), ids)
for im, seg_a, seg_b in zip(im_fs, seg_a_fs, seg_b_fs):
logger.info(f"Performing on: \n{pprint.pformat([im, seg_a, seg_b])}")
df = get_radiomics_features(p_im.joinpath(im),
p_seg_A.joinpath(seg_a),
p_setting,
p_seg_B.joinpath(seg_b),
id_globber="^(NPC|T1rhoNPC|K|P|RHO)?[0-9]{2,4}",
augmentor=transform)
logger.debug(f"df: {df}")
dfs.append(df)
dfs = pd.concat(dfs, axis=1)
new_index = [o.split('_') for o in dfs.index]
new_index = pd.MultiIndex.from_tuples(new_index, names=('Pre-processing', 'Feature_Group', 'Feature_Name'))
dfs.index = new_index
logger.debug(f"dfs:\n {dfs.drop('diagnostics').to_string()}")
pass
def test_feature_selection(self):
from npc_radiomics.feature_selection import FeatureSelector
from mnts.mnts_logger import MNTSLogger
import pandas as pd
globber = "^[0-9]+"
p_feat_a = Path('../samples/samples_feat_1st.xlsx')
p_feat_b = Path('../samples/samples_feat_2nd.xlsx')
p_gt = Path('../samples/sample_datasheet.csv')
features_a = pd.read_excel(str(p_feat_a), index_col=[0, 1, 2]).T
features_b = pd.read_excel(str(p_feat_b), index_col=[0, 1, 2]).T
gt = pd.read_csv(str(p_gt), index_col=0)
cases = set(features_a.index) & set(gt.index)
gt = gt.loc[cases]
passed = False
with MNTSLogger('./default.log', keep_file=False, verbose=True) as logger,\
tempfile.NamedTemporaryFile('wb', suffix = '.fss') as f:
fs = FeatureSelector(n_trials=20, boot_runs=5,
criteria_threshold=[0.1, 0.1, 0.1],
thres_percentage=0.2,
boosting=True) # Use default criteria, test with boosting
test_result = {x: "Untested" for x in ['Single feature set',
'Two paired feature sets',
'Save/load',
'n_trial = 1',
'n_trial & boot_run = 1']}
# Test one segmentation
logger.info("{:-^50s}".format(" Testing single feature set "))
try:
feats = fs.fit(features_a, gt)
test_result['Single feature set'] = "Passed"
except:
test_result['Single feature set'] = "Failed"
logger.info("Single feature set: Passed")
# Test two segmentation
logger.info("{:-^50s}".format(" Testing pair feature set "))
cases = set(features_a.index) & set(features_b.index) & set(gt.index)
try:
feats = fs.fit(features_a.loc[cases], gt.loc[cases], features_b.loc[cases])
test_result['Two paired feature sets'] = "Passed"
except:
test_result['Two paired feature sets'] = "Failed"
# Testing save and load function
logger.info("{:-^50s}".format(" Testing state save/load "))
try:
fs.save(Path(f.name))
_new_fs = FeatureSelector()
_new_fs.load(Path(f.name))
_feats = _new_fs.predict(features_a)
logger.info(f"Left:\n {_feats.T}")
logger.info(f"Right:\n {feats[0].T}")
logger.info(f"Save/load (Passed)")
test_result['Save/load'] = "Passed"
except:
test_result['Save/load'] = "Failed"
# Test single trial (feature selection using enet with frequency threshold)
logger.info("{:-^50s}".format(" Testing n_trial = 1 "))
try:
fs.setting['n_trials'] = 1
feats = fs.fit(features_a.loc[cases], gt.loc[cases], features_b.loc[cases])
logger.info("n_trial = 1: Passed")
test_result['n_trial = 1'] = "Passed"
except:
test_result['n_trial = 1'] = "Failed"
# Test single boot_run (feature selection using enet without frequency threshold)
logger.info("{:-^50s}".format(" Testing n_trial & boot_run = 1 "))
try:
fs.setting['n_trials'] = 1
fs.setting['boot_runs'] = 1
feats = fs.fit(features_a.loc[cases], gt.loc[cases], features_b.loc[cases])
logger.info(f"Single Enet run features extracted: {feats[0].columns}")
logger.info("n_trial & boot_run: Passed")
test_result['n_trial & boot_run = 1'] = "Passed"
except:
test_result['n_trial & boot_run = 1'] = "Failed"
logger.info(f"Test results: \n{pd.Series(test_result, name='Test results').to_frame().to_string()}")
self.assertFalse(all([x == "Passed" for x in test_result.items()]))
def test_model_building(self):
from npc_radiomics.feature_selection import FeatureSelector
from npc_radiomics.model_building import ModelBuilder
from mnts.mnts_logger import MNTSLogger
from sklearn.model_selection import train_test_split
import pandas as pd
globber = "^[0-9]+"
p_feat_a = Path('../samples/samples_feat_1st.xlsx')
p_gt = Path('../samples/sample_datasheet.csv')
p_fss = Path('../samples/fs_saved_state.fss')
features = pd.read_excel(str(p_feat_a), index_col=[0, 1, 2]).T
gt = pd.read_csv(str(p_gt), index_col=0)
cases = set(features.index) & set(gt.index)
gt = gt.loc[cases]
features = features.loc[cases]
with MNTSLogger('./default.log', keep_file=False, verbose=True, log_level='debug') as logger, \
tempfile.NamedTemporaryFile('wb', suffix='.pkl') as f:
fs = FeatureSelector()
fs.load(p_fss)
features = fs.predict(features)
logger.info(f"Selected features are: {features.T}")
# Random train test split
splitter = train_test_split(features.index, test_size=0.2)
train_feats, test_feats = splitter
logger.info(f"Training group: {train_feats}")
logger.info(f"Testing group: {test_feats}")
logger.info("{:-^50s}".format(" Building model "))
model = ModelBuilder()
# Test model building with testing data
try:
results, predict_table = model.fit(features.loc[train_feats], gt.loc[train_feats],
features.loc[test_feats], gt.loc[test_feats])
except:
logger.warning("Fitting with testing data failed!")
# Test model building without testing data
try:
results, _ = model.fit(features.loc[train_feats], gt.loc[train_feats])
except Exception as e:
logger.warning("Fitting without testing data failed!")
logger.exception(f"{e}")
logger.info(f"Results: {pprint.pformat(results)}")
logger.info(f"Predict_table: {predict_table.to_string()}")
logger.info(f"Best params: {pprint.pformat(model.saved_state)}")
# Test save functionality
logger.info("{:-^50s}".format(" Testing model save/load "))
model.save(Path(f.name))
# Test load functionality
_model = ModelBuilder()
_model.load(Path(f.name))
logger.debug(f"Saved state: {pprint.pformat(_model.saved_state)}")
_predict_table = _model.predict(features.loc[test_feats])
logger.info(f"Left:\n {_predict_table}")
logger.info(f"Right:\n {predict_table}")
pass
def test_controller_extraction(self):
from npc_radiomics.controller import Controller
from mnts.mnts_logger import MNTSLogger
p = Path('../samples/sample_controller_settings.yml')
p_im = Path('../samples/images_not_normalized/')
p_seg = Path('../samples/segment/')
p_gt = Path('../samples/sample_datasheet.csv')
p_pyrad = Path('../samples/sample_pyrad_settings.yml')
p_fe_state = Path('../samples/fe_saved_state.fe')
# extract feature was ported to the controller, test it
with MNTSLogger('./default.log', verbose=True, keep_file=False, log_level='debug') as logger:
ctl = Controller(setting=p, with_norm=True)
ctl.load_norm_settings(fe_state=p_fe_state)
df = ctl.extract_feature(p_im, p_seg, py_rad_param_file=p_pyrad)
logger.info(f"features {df}")
pass
def test_controller_load_norm(self):
from npc_radiomics.controller import Controller
from mnts.mnts_logger import MNTSLogger
p = Path('../samples/sample_controller_settings.yml')
p_norm_state = Path('../assets/t2wfs/')
p_norm_graph = Path('../assets/t2wfs/norm_graph.yml')
p_fe_state = Path('../samples/fe_saved_state.fe')
with MNTSLogger('./default.log', verbose=True, keep_file=False) as logger:
ctl = Controller(setting=p, with_norm=True)
ctl.load_norm_settings(norm_graph=p_norm_graph, norm_state_file=p_norm_state)
logger.info(f"State 1: \n{pprint.pformat(ctl.extractor.saved_state)}")
ctl.load_norm_settings(fe_state=p_fe_state)
logger.info(f"State 2: \n{pprint.pformat(ctl.extractor.saved_state)}")
def test_controller_fit(self):
from npc_radiomics.controller import Controller
from mnts.mnts_logger import MNTSLogger
p = Path('../samples/sample_controller_settings.yml')
p_im = Path('../samples/images_not_normalized/')
p_seg = Path('../samples/segment/')
p_gt = Path('../samples/sample_datasheet.csv')
p_pyrad = Path('../samples/sample_pyrad_settings.yml')
p_fe_state = Path('../samples/fe_saved_state.fe')
# extract feature was ported to the controller, test it
with MNTSLogger('./default.log', verbose=True, keep_file=False, log_level='debug') as logger, \
tempfile.NamedTemporaryFile('wb', suffix='.ctl') as f:
ctl = Controller(setting=p, with_norm=True)
ctl.load_norm_settings(fe_state=p_fe_state)
ctl.fit(p_im, p_seg, p_gt)
ctl.save(f.name)
_ctl = Controller()
_ctl.load(f.name)
logger.info(f"Saved state: {_ctl.saved_state}")
def test_stability_metric(self):
from npc_radiomics.perf_metric import getStability, confidenceIntervals, hypothesisTestT, hypothesisTestV, feat_list_to_binary_mat
import pandas as pd
from mnts.mnts_logger import MNTSLogger
with MNTSLogger('./default.log', verbose=True, keep_file=False) as logger:
test_result = {x: "Untested" for x in ['Binary feature map',
'Stability measure',
'Statistical Test'
]}
p_sel_1 = Path('../samples/sample_selected_feat_1.xlsx')
p_sel_2 = Path('../samples/sample_selected_feat_2.xlsx')
p_feat_list = Path('../samples/samples_feat_1st.xlsx')
sel_1 = pd.read_excel(p_sel_1, index_col=0).fillna('').astype(str)
sel_2 = pd.read_excel(p_sel_2, index_col=0).fillna('').astype(str)
feats = [str(s) for s in
|
pd.read_excel(p_feat_list, index_col=[0, 1, 2])
|
pandas.read_excel
|
"""
Contain codes about parse plate info and generate sample sheet
"""
import pathlib
import re
from collections import OrderedDict
import pandas as pd
import cemba_data
# Load defaults
PACKAGE_DIR = pathlib.Path(cemba_data.__path__[0])
# the Illumina sample sheet header used by Ecker Lab
with open(PACKAGE_DIR / 'files/sample_sheet_header.txt') as _f:
SAMPLESHEET_DEFAULT_HEADER = _f.read()
SECTIONS = ['[CriticalInfo]', '[LibraryInfo]', '[PlateInfo]']
LIMITED_CHOICES = {
'n_random_index': [8, 384, '8', '384'],
'input_plate_size': [384, '384'],
'primer_quarter': ['Set1_Q1', 'Set1_Q2', 'Set1_Q3', 'Set1_Q4',
'SetB_Q1', 'SetB_Q2', 'SetB_Q3', 'SetB_Q4']}
CRITICAL_INFO_KEYS = ['n_random_index', 'input_plate_size',
'pool_id', 'tube_label', 'email']
# key (n_random_index, input_plate_size)
BARCODE_TABLE = {
('8', '384'): PACKAGE_DIR / 'files/V1_i7_i5_index.tsv', # V1 can use both Set1 and SetB i5 i7 primer
('384', '384'): PACKAGE_DIR / 'files/V2_i7_i5_index.tsv' # V2 only use SetB primer
}
def _clean_str_for_path(str_in):
# replace special char with _
str_out = re.sub('[^a-zA-Z0-9]', '_', str_in.strip())
return str_out
def _get_kv_pair(line):
try:
k, v = line.split('=')
if k == 'email':
return k, v
else:
return _clean_str_for_path(k), _clean_str_for_path(v)
except ValueError:
raise ValueError(f'Each key=value line must contain a "=" to separate key and value. Got {line}')
def _read_plate_info(plate_info_path):
"""Parse the plate info file"""
cur_section = ''
cur_section_id = -1
critical_info = {}
library_info = OrderedDict()
plate_header = True
plate_info = []
with open(plate_info_path) as f:
for line in f:
line = line.strip('\n')
if line == '' or line.startswith('#'):
continue
# determine section
if line.startswith('['):
cur_section_id += 1
if line == SECTIONS[cur_section_id]:
cur_section = line
else:
raise ValueError(
f'Section name and order must be [CriticalInfo] [LibraryInfo] [PlateInfo], '
f'got {line} at No.{cur_section_id + 1} section.')
elif cur_section == '[CriticalInfo]':
k, v = _get_kv_pair(line)
if k not in CRITICAL_INFO_KEYS:
raise ValueError(f'Unknown key {k} in [CriticalInfo]')
else:
critical_info[k] = v
elif cur_section == '[LibraryInfo]':
k, v = _get_kv_pair(line)
if (k in critical_info.keys()) or (k in library_info.keys()):
raise ValueError(f'Found duplicated key {k}')
else:
library_info[k] = v
elif cur_section == '[PlateInfo]':
ll = line.split('\t')
if plate_header:
plate_header = False
plate_info.append(ll)
else:
raise ValueError(f'Got a malformed line: {line}')
for k in CRITICAL_INFO_KEYS:
if k not in critical_info:
raise ValueError(f'[CriticalInfo] missing key-value pair "{k}"')
header = plate_info[0]
plate_info =
|
pd.DataFrame(plate_info[1:], columns=plate_info[0])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib
#matplotlib.use("Agg")
import seaborn as sns
from matplotlib import pyplot as plt
import sys
colors2 = sns.color_palette("Set1", 12)[6:]
colors = [(0.6, 0.6, 0.6)] * 12
def read_dfs(i, j):
df = pd.read_csv(sys.argv[i], sep="\t", index_col="sample")
df2 = pd.read_csv(sys.argv[j], sep="\t", index_col="sample")
try:
df = df.drop(["RGP_426_3", "RGP_430_3", "RGP_462_3"])
df2 = df2.drop(["RGP_426_3", "RGP_430_3", "RGP_462_3"])
except:
pass
dfo = df.join(df2)
dfo.drop("comphet_side", inplace=True, axis="columns")
ad = dfo.auto_dom.copy()
ad =
|
pd.DataFrame({"number_of_variants": ad})
|
pandas.DataFrame
|
### 공시지가/아파트실거래가/연립주택실거래가 K-NN ###
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, classification_report
import sklearn.neighbors as neg
import matplotlib.pyplot as plt
import json
import sklearn.preprocessing as pp
import warnings
warnings.filterwarnings('ignore')
## 데이터 전처리 ## --> 이상치 제거, 표준화 필요 ##
all_data = pd.read_csv("d:/project_data/house_clean02.csv", dtype=np.str, encoding='euc-kr') # encodig: 'euc-kr'
# 면적 당 공시지가 추가 # --> string type이므로 astype을 통해 타입 변경
all_data['y_price'] = all_data['공시지가'].astype(np.float32) / all_data['면적'].astype(np.float32)
# X: (x, y) / y: (면적 당 공시지가) #
X = all_data.iloc[:, 9:11].astype(np.float32) # shape (28046, 2)
y = all_data['y_price'] # shape (28046, )
## Robust scaling ## --> 이상치를 반영한 정규화(min-max)
rs = pp.RobustScaler()
y_scale = rs.fit_transform(np.array(y).reshape(-1, 1))
## 실거래가 아파트 데이터 전처리 ## --> shape (281684, 7)
all_data_apt = pd.read_csv("d:/project_data/total_Apt.csv", sep=",", encoding='euc-kr')
all_data_apt['price_big'] = all_data_apt['Price'] / all_data_apt['Howbig']
X_apt = all_data_apt.iloc[:, -3:-1] # shape (281684, 2)
y_apt_scale = rs.fit_transform(np.array(all_data_apt['price_big']).reshape(-1, 1)) # shape(281684, 1)
## 실거래가 연립 데이터 전처리 ## --> shape ()
all_data_town = pd.read_csv("d:/project_Data/total_Townhouse01.csv", sep=",", encoding="cp949")
all_data_town['price_big'] = all_data_town['Price'] / all_data_town['Howbig']
X_town = all_data_town.iloc[:, -3:-1] # shape (281684, 2)
y_town_scale = rs.fit_transform(np.array(all_data_town['price_big']).reshape(-1, 1)) # shape(281684, 1)
## 어린이집 데이터 전처리 ##
all_center =
|
pd.read_csv("d:/project_data/all_center9.csv", encoding="euc-kr")
|
pandas.read_csv
|
import pandas as pd
from zipline.gens.brokers.ib_broker import IBBroker
from zipline import run_algorithm
tws_uri = 'localhost:7496:1236'
brokerobj = IBBroker (tws_uri)
start =
|
pd.to_datetime('2020-06-25')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""Road-Friction-Forecasting.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1W15eOQbeHp9wbJWRaE0f7ZfYv_jAj14O
# Authors:
**<NAME>**
* LinkedIn: https://www.linkedin.com/in/md-abrar-jahin-9a026018b
* Facebook: https://www.facebook.com/
* Github: https://github.com/Abrar2652
* email: <EMAIL>
**<NAME>**
* Website: https://krutsylo.neocities.org
* email: <EMAIL>
# Import Libraries and Packages
After importing libraries and packages, we start off by defining a function `transform_to_supervised` that creates desired **lag** *(24 hours in this case)* and **forecasting** features *(1 hour)* of our independent variables concatening with the dataframe and returns the final dataframe.
"""
import os
import optuna
import pickle
import pandas as pd
from optuna import Trial
from optuna.samplers import TPESampler
from sklearn.impute import KNNImputer
from sklearn.model_selection import StratifiedKFold, cross_val_score
from xgboost import XGBClassifier, XGBRegressor
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error, accuracy_score, balanced_accuracy_score
import numpy as np
def transform_to_supervised(df,
previous_steps=1,
forecast_steps=1,
dropnan=False):
"""
https://gist.github.com/monocongo/6e0df19c9dd845f3f465a9a6ccfcef37
Transforms a DataFrame containing time series data into a DataFrame
containing data suitable for use as a supervised learning problem.
Derived from code originally found at
https://machinelearningmastery.com/convert-time-series-supervised-learning-problem-python/
:param df: pandas DataFrame object containing columns of time series values
:param previous_steps: the number of previous steps that will be included in the
output DataFrame corresponding to each input column
:param forecast_steps: the number of forecast steps that will be included in the
output DataFrame corresponding to each input column
:return Pandas DataFrame containing original columns, renamed <orig_name>(t), as well as
columns for previous steps, <orig_name>(t-1) ... <orig_name>(t-n) and columns
for forecast steps, <orig_name>(t+1) ... <orig_name>(t+n)
"""
# original column names
col_names = df.columns
# list of columns and corresponding names we'll build from
# the originals found in the input DataFrame
cols, names = list(), list()
# input sequence (t-n, ... t-1)
# Lag features
for i in range(previous_steps, 0, -1):
cols.append(df.shift(i))
names += [('%s(t-%d)' % (col_name, i)) for col_name in col_names]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, forecast_steps):
cols.append(df.shift(-i))
if i == 0:
names += [('%s(t)' % col_name) for col_name in col_names]
else:
names += [('%s(t+%d)' % (col_name, i)) for col_name in col_names]
# put all the columns together into a single aggregated DataFrame
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
"""# Data Collection and Import data
The dataset has been collected from the **Smart Road - Winter Road Maintenance Challenge 2021** organized by *UiT The Arctic University of Norway* on Devpost.
Dataset download link: https://uitno.app.box.com/s/bch09z27weq0wpcv8dbbc18sxz6cycjt
After downloading the `smart_road_measurements.csv` file from the competition page, wehad added extra columns collecting data from the external resources authorized the organizers. The links of the external datasets are:
[1] Weather data https://pypi.org/project/wwo-hist/
[2] UV Index data https://pyowm.readthedocs.io/en/latest/v3/uv-api-usage-examples.html
After merging these 3 files together based on the same dates, we finalized our main dataset `smart_road_measurements_new_d_weather.csv` on top of which we will build our model after preprocessing.
"""
df = pd.read_csv("/content/smart_road_measurements_new_d_weather.csv", header=0)
df2 = df.copy()
df.head(15)
"""# Exploratory Data Analysis
Our dataset contains 349613 rows and 29 columns
"""
df.shape
df.info()
import numpy as np
np.random.seed(0)
import seaborn as sns
sns.set_theme()
_ = sns.heatmap(df2.iloc[:,2:11].corr())
_ = sns.heatmap(df2.corr())
"""We want to predict Friction of the road by weather conditions. So,
this is a classification task. Every day the car drives on a new route.
This means that all 11 days we receive data on new road sections. So, the
only link between the road sections is the average weather conditions.
This can be achieved by filtering the rows on **Microsoft Excel** for each date and get the total distance covered (the last row on each date because the column is cumulative in nature)
**Max Distance traveled, Date**
42441, 16/02/2021
92311, 17/02/2021
150216, 18/02/2021
39007, 19/02/2021
71358, 22/02/2021
81999, 23/02/2021
55958, 24/02/2021
77315, 25/02/2021
55647, 26/02/2021
61534, 1/03/2021
12409, 2/03/2021
**Therefore, we can see from the above data that for all 11 days the car was driving at different routes**
* We drop the `Distance` because the condition of the road does not depend on how much the car has traveled before. We use this column to get the speed and slope of the road.
* This means that we are using normalized data + lag (time-series
classification with engineered features instead of time-series
classification with deep learning, because we have shallow data).
We won't focus on any complicated models, just XGBClassifier to win.
* Now we need to define at what Friction the road is dangerous (label 0),
requires caution (label-1) and safe (label-2).
Ta, Tsurf, friction are **highly correlated** which has been shown in our pandas profiling
https://krutsylo.neocities.org/SmartRoads/pandas3.html of the smart road dataset.
Yet we'll drop State, Height, Distance, Ta, Tsurf, Water, moon-illumination, uvIndex columns
"""
df = df.drop("Height", axis=1) # contain N/A
df = df.drop("Distance", axis=1)
df = df.drop("State", axis=1)
df = df.drop("Ta", axis=1)
df = df.drop("Tsurf", axis=1)
df = df.drop("Water", axis=1)
df = df.drop("moon_illumination", axis=1)
df = df.drop("uvIndex", axis=1)
df.head()
""" We have grouped the data by calculating the mean of the rows in each hour based on the individual dates. For instance, if there are 8 rows for each hour, we calculated the mean of 8 rows and thus converted into a single row belonging to the distinct dates.
We also avoided duplicates to reduce the noise in the data.
"""
df['Time(+01:00)'] = pd.to_datetime(df['Time(+01:00)'], format='%H:%M:%S').dt.hour
df = df.groupby(['Date','Time(+01:00)']).mean()
df = df.drop_duplicates()
df.head()
"""Now we will work on the target feature that is `Friction` column to accomplish our objective since we want to perform a supervised machine learning model. Here we applied our knowledge of physics and research capabilities.
Icy: These roads typically have the lowest coefficient of friction. For drivers, this is the most dangerous surface to be on. The small coefficient of friction gives the driver the least amount of traction when accelerating, braking, or turning (which has angular acceleration). Icy roads have a frictional coefficient of around 0.1.
Wet: Roads wet with water have a coefficient of friction of around .4. This is around 4 times higher than an icy road. Although these roads are much safer to drive on, there is still the possibility of hydroplaning. Hydroplaning occurs when there is standing or flowing water on the road (typically from rainfall) that causes a tire to lose contact with the road's surface. The treads are designed to allow water to fill the crevices so that contact may be maintained between the road and the tire. However, if there is too much water, this may not be achieved, and hydroplaning will occur. This is precisely the reason that racing slicks have such a high coefficient of friction on dry roads (about .9) and a much lower coefficient on wet roads (as low as .1).
Dry: Roads without precipitation are considered optimal for driving conditions. They have the highest coefficient of friction, around 0.9, which creates the most traction. This allows corners, acceleration, and braking to reach higher values without loss of control. Oftentimes, if roads are not dry, races will be canceled due to the extreme dangers that a less than optimal frictional surface can pose.
So, we'll take (0 <= friction < 0.5) as *dangerous*, and (0.5 < friction <= 1) as *safe*
"""
bins = [0, 0.5, 1]
labels = [0, 1]
df["Friction"] = pd.cut(df["Friction"], bins, labels=labels)
#df = df.drop("Date", axis=1)
#df = df.drop("Time(+01:00)", axis=1)
df.head()
"""Now we'll perform lagging and forecasting feature columns by shifting simply using our pre-defined `transform_to_supervise` function."""
df = transform_to_supervised(df, previous_steps=24, forecast_steps=1, dropnan=True)
Y = df.loc[:, "Friction(t)"].to_numpy()
cols = [c for c in df.columns if '(t)' not in c]
data=df[cols]
data['Friction'] = Y
data.to_csv('/content/test.csv')
data = data.values.tolist()
df[cols].head()
"""**Lag of 1 to 3 days**"""
lag = pd.read_csv('/content/lag(1-3)days.csv')
lag=lag.head(10)
lag
ax = lag.plot(x="Date", y="humidity(t-3)", kind="bar")
lag.plot(x="Date", y="humidity(t-2)", kind="bar", ax=ax, color="C2")
lag.plot(x="Date", y="humidity(t-1)", kind="bar", ax=ax, color="C3")
ax = lag.plot(x="Date", y="windspeedKmph(t-3)", kind="bar")
lag.plot(x="Date", y="windspeedKmph(t-2)", kind="bar", ax=ax, color="C2")
lag.plot(x="Date", y="windspeedKmph(t-1)", kind="bar", ax=ax, color="C3")
"""# Statistical Analysis
**Mean values of each column**
"""
mean =
|
pd.read_csv('/content/Mean.csv')
|
pandas.read_csv
|
# Load the library with the iris dataset
from sklearn.datasets import load_iris
# Load scikit's random forest classifier library
from sklearn.ensemble import RandomForestClassifier
from scipy import interp
# Using Skicit-learn to split data into training and testing sets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, mean_squared_error, roc_auc_score,roc_curve, auc
from sklearn.ensemble import RandomForestRegressor
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from statistics import mean, stdev
import seaborn as sns
from sklearn.model_selection import StratifiedKFold
# Load pandas
import pandas as pd
# Load numpy
import numpy as np
from sklearn import preprocessing
from numpy import array
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score,cross_val_predict
def average(nums, default=float('nan')):
return sum(nums) / float(len(nums)) if nums else default
def read_csv(csv_file, nrows=None):
df =
|
pd.read_csv(csv_file, nrows=nrows)
|
pandas.read_csv
|
'''
Group enabled ANPNetwork class and supporting classes.
'''
from pyanp.pairwise import Pairwise
from pyanp.prioritizer import Prioritizer, PriorityType
from pyanp.general import islist, unwrap_list, get_matrix, matrix_as_df
from typing import Union
import pandas as pd
from copy import deepcopy
from pyanp.limitmatrix import normalize, calculus, priority_from_limit
import numpy as np
import re
from pyanp.rating import Rating
class ANPNode:
'''
A node inside a cluster, inside a netowrk. The basic building block of
an ANP netowrk.
:param network: An ANPNetwork object that this node lives inside.
:param cluster: An ANPCluster object that this node lives inside.
:param name: The name of this node.
'''
def __init__(self, network, cluster, name:str):
self.name = name
self.cluster = cluster
self.network = network
self.node_prioritizers = {}
self.subnetwork = None
self.invert = False
def is_node_cluster_connection(self, dest_cluster:str)->bool:
'''
Is this node connected to a cluster.
:param dest_cluster: The name of the cluster
:return: True/False
'''
if dest_cluster in self.node_prioritizers:
return True
else:
return False
def node_connect(self, dest_node)->None:
''''
Make a node connection from this node to dest_node
:param dest_node: The destination node as a str, int, or ANPNode. It
can be a list of nodes, and then we will coonect each node from
this node. The dest_node should be in any format accepted by
ANPNetwork._get_node()
'''
if islist(dest_node):
for dn in dest_node:
self.node_connect(dn)
else:
prioritizer = self.get_node_prioritizer(dest_node, create=True)
prioritizer.add_alt(dest_node, ignore_existing=True)
#Make sure parent clusters are connected
src_cluster = self.cluster
dest_cluster = self.network._get_node_cluster(dest_node)
src_cluster.cluster_connect(dest_cluster)
def get_node_prioritizer(self, dest_node, create=False,
create_class=Pairwise, dest_is_cluster=False)->Prioritizer:
'''
Gets the node prioritizer for the other_node
:param dest_node: The node as a int, str, or ANPNode object.
:return: The prioritizer if it exists, or None
'''
if dest_is_cluster:
dest_cluster = self.network.cluster_obj(dest_node)
dest_name = dest_cluster.name
else:
dest_cluster = self.network._get_node_cluster(dest_node)
dest_name = dest_cluster.name
if dest_name not in self.node_prioritizers:
if create:
prioritizer = create_class()
self.node_prioritizers[dest_name] = prioritizer
return prioritizer
else:
return None
else:
return self.node_prioritizers[dest_name]
def is_node_node_connection(self, dest_node)->bool:
'''
Checks if there is a node connection from this node to dest_node
:param dest_node: The node as a int, str, or ANPNode object.
:return:
'''
pri = self.get_node_prioritizer(dest_node)
if pri is None:
return False
elif not pri.is_alt(dest_node):
return False
else:
return True
def get_unscaled_column(self, username=None)->pd.Series:
'''
Returns the column in the unscaled supermatrix for this node.
:param username: The user/users to do this for. Typical Prioritizer
calculation usage, i.e. None means do for all group average.
:return: A pandas series indexed by the node names.
'''
nnodes = self.network.nnodes()
rval = pd.Series(data=[0.0]*nnodes, index=self.network.node_names())
prioritizer:Prioritizer
for prioritizer in self.node_prioritizers.values():
vals = prioritizer.priority(username, PriorityType.NORMALIZE)
for alt, val in vals.iteritems():
rval[alt] = val
return rval
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
pri:Prioritizer
for pri in self.node_prioritizers.values():
pri.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def set_node_prioritizer_type(self, destNode, prioritizer_class):
'''
Sets the node prioritizer type
:param destNode: An ANPNode object, string, or integer location
:param prioritizer_class: The new type
:return: None
'''
pri = self.get_node_prioritizer(destNode, create_class=prioritizer_class)
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
dest_cluster = self.network._get_node_cluster(destNode)
dest_name = dest_cluster.name
self.node_prioritizers[dest_name] = rval
else:
pass
class ANPCluster:
'''
A cluster in an ANP object
:param network: The ANPNetowrk object this cluster is in.
:param name: The name of the cluster to create.
'''
def __init__(self, network, name:str):
self.prioritizer = Pairwise()
self.name = name
self.network = network
# The list of ANP nodes in this cluster
self.nodes = {}
def add_node(self, *nodes)->None:
"""
Adds one or more nodes
:param nodes: A vararg list of node names to add to this cluster.
The names should all be strings.
:return: Nonthing
"""
nodes = unwrap_list(nodes)
if islist(nodes):
for node in nodes:
if isinstance(node, str):
self.add_node(node)
else:
self.nodes[nodes] = ANPNode(self.network, self, nodes)
def nnodes(self)->int:
"""
:return: The number of nodes in this cluster.
"""
return len(self.nodes)
def is_node(self, node_name:str)->bool:
'''
Does a node by that name exist in this cluster
:param node_name: The name of the node to look for
:return: True/False
'''
return node_name in self.nodes
def node_obj(self, node_name):
"""
Get a node in this cluster.
:param node_name: The node as either a string name, integer position, or
simply the ANPObject, in which case there is nothing to do except
return it.
:return: ANPNode object. If it wasn't found, None is returned.
"""
if isinstance(node_name, ANPNode):
return node_name
else:
return get_item(self.nodes, node_name)
def node_names(self)->list:
'''
:return: List of the string names of the nodes in this cluster
'''
return list(self.nodes.keys())
def node_objs(self)->list:
'''
:return: List of the ANPNode objects in this cluster.
'''
return self.nodes.values()
def cluster_connect(self, dest_cluster)->None:
"""
Make a cluster->cluster connection from this node to the destination.
:param dest_cluster: Either the ANPCluster object to connect to, or
the name of the destination cluster.
:return:
"""
if isinstance(dest_cluster, ANPCluster):
dest_cluster_name = dest_cluster.name
else:
dest_cluster_name = dest_cluster
self.prioritizer.add_alt(dest_cluster_name, ignore_existing=True)
def set_prioritizer_type(self, prioritizer_class)->None:
'''
Sets the cluster prioritizer type
:param prioritizer_class: The new type
:return: None
'''
pri = self.prioritizer
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
self.prioritizer = rval
else:
pass
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
if self.prioritizer is not None:
self.prioritizer.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item")
__CLEAN_SPACES_RE = re.compile('\\s+')
def clean_name(name:str)->str:
"""
Cleans up a string for usage by:
1. stripping off begging and ending spaces
2. All spaces convert to one space
3. \t and \n are treated like a space
:param name: The string name to be cleaned
:return: The cleaned name.
"""
rval = name.strip()
return __CLEAN_SPACES_RE.sub(string=rval, repl=' ')
def sum_subnetwork_formula(priorities:pd.Series, dict_of_series:dict):
"""
A function that takes the weighted sum of values. Used for synthesis.
:param priorities: Series whose index are the nodes with subnetworks and
values are their weights.
:param dict_of_series: A dictionary whose keys are the same as the keys of
priorities, i.e. the nodes with subnetworks. The values are Series
whose keys are alternative names and values are the synthesized
alternative scores under that subnetwork.
:return:
"""
subpriorities = priorities[dict_of_series.keys()]
if sum(subpriorities) != 0:
subpriorities /= sum(subpriorities)
rval = pd.Series()
counts = pd.Series(dtype=int)
for subnet_name, vals in dict_of_series.items():
priority = subpriorities[subnet_name]
for alt_name, val in vals.iteritems():
if alt_name in rval:
rval[alt_name] += val * priority
counts[alt_name] += priority
else:
rval[alt_name] = val
counts[alt_name] = priority
# Now let's calculate the averages
for alt_name, val in rval.iteritems():
if counts[alt_name] > 0:
rval[alt_name] /= counts[alt_name]
return rval
class ANPNetwork(Prioritizer):
'''
Represents an ANP prioritizer. Has clusters/nodes, comparisons, etc.
:param create_alts_cluster: If True (which is the default) we start with a
cluster that is the alternatives cluster. Otherwise the model starts
empty.
'''
def __init__(self, create_alts_cluster=True):
self.clusters = {}
if create_alts_cluster:
cl = self.add_cluster("Alternatives")
self.alts_cluster = cl
self.users=[]
self.limitcalc = calculus
self.subnet_formula = sum_subnetwork_formula
self.default_priority_type = None
def add_cluster(self, *args)->ANPCluster:
'''
Adds one or more clusters to a network
:param args: Can be either a single string, or a list of strings
:return: ANPCluster object or list of ANPCluster objects
'''
clusters = unwrap_list(args)
if islist(clusters):
rval = []
for cl in clusters:
rval.append(self.add_cluster(cl))
return rval
else:
#Adding a single cluster
cl = ANPCluster(self, clusters)
self.clusters[clusters] = cl
return cl
def cluster_names(self)->list:
'''
:return: List of string names of the clusters
'''
return list(self.clusters.keys())
def nclusters(self)->int:
'''
:return: The number of clusters in the network.
'''
return len(self.clusters)
def cluster_obj(self, cluster_info:Union[ANPCluster, str])->ANPCluster:
'''
Returns the cluster with given information
:param cluster_info: Either the name of the cluster object to get
or the cluster object, or its int position
:return: The ANPCluster object
'''
if isinstance(cluster_info, ANPCluster):
return cluster_info
else:
return get_item(self.clusters, cluster_info)
def add_node(self, cl, *nodes):
'''
Adds nodes to a cluster
:param cl: The cluster name or object
:param nodes: The name or names of the nodes
:return: Nothing
'''
cluster = self.cluster_obj(cl)
cluster.add_node(nodes)
def nnodes(self, cluster=None)->int:
"""
Returns the number of nodes in the network, or a cluster.
:param cluster: If None, we return the number of nodes in the network.
Otherwise this is the integer position, string name, or ANPCluster
object of the cluster to get the node count within.
:return: The count.
"""
if cluster is None:
rval = pd.Series()
for cname, cluster in self.clusters.items():
rval[cname] = cluster.nnodes()
return sum(rval)
else:
clobj = self.cluster_obj(cluster)
return clobj.nnodes()
def add_alt(self, alt_name:str):
"""
Adds an alternative to the model:
1. Adds the altenrative to alts_cluster if not None
2. For each node with a subnetwork, we add the alternative to that subnetwork.
:param alt_name: The name of the alternative to add
:return: Nothing
"""
if self.alts_cluster is not None:
self.add_node(self.alts_cluster, alt_name)
# We should add this alternative to each subnetwork
for node in self.node_objs_with_subnet():
node.subnetwork.add_alt(alt_name)
def is_user(self, uname)->bool:
'''
Checks if a user exists
:param uname: The name of the user to check for
:return: bool
'''
return uname in self.users
def is_alt(self, altname)->bool:
'''
Checks if an alternative exists
:param altname: The alterantive name to look for
:return: bool
'''
return self.alts_cluster.is_node(altname)
def add_user(self, uname, ignore_dupe=False):
'''
Adds a user to the system
:param uname: The name of the new user
:return: Nothing
:raise ValueError If the user already existed
'''
if islist(uname):
for un in uname:
self.add_user(un, ignore_dupe=ignore_dupe)
return
if self.is_user(uname):
if not ignore_dupe:
raise ValueError("User by the name "+uname+" already existed")
else:
return
self.users.append(uname)
def nusers(self)->int:
'''
:return: The number of users
'''
return len(self.users)
def user_names(self)->list:
'''
:return: List of names of the users
'''
return deepcopy(self.users)
def node_obj(self, node_name)->ANPNode:
'''
Gets the ANPNode object of the node with the given name
:param node_name: The name of the node to get, or it's overall integer
position, or the ANPNode object itself
:return: The ANPNode if it exists, or None
'''
if isinstance(node_name, ANPNode):
return node_name
elif isinstance(node_name, int):
#Reference by integer
node_pos = node_name
node_count = 0
for cluster in self.clusters.values():
rel_pos = node_pos - node_count
if rel_pos < cluster.nnodes():
return cluster.node_obj(rel_pos)
#If we make it here, we were out of bounds
return None
#Okay handle string node name
cluster: ANPCluster
for cname, cluster in self.clusters.items():
rval = cluster.node_obj(node_name)
if rval is not None:
return rval
#Made it here, the node didn't exist
return None
def _get_node_cluster(self, node)->ANPCluster:
'''
Gets the ANPCluster object a node lives in
:param node: The name/integer positions, or ANPNode object itself. See
node_obj() method for more details.
:return: The ANPCluster object this node lives in, or None if it doesn't
exist.
'''
n = self.node_obj(node)
if n is None:
# Could not find the node
return None
return n.cluster
def node_connect(self, src_node, dest_node):
'''
connects 2 nodes
:param src_node: Source node as prescribed by node_object() function
:param dest_node: Destination node as prescribed by node_object() function
:return: Nothing
'''
src = self.node_obj(src_node)
src.node_connect(dest_node)
def node_names(self, cluster=None)->list:
'''
Returns a list of nodes in this network, organized by cluster
:param cluster: If None, we get all nodes in network, else we get nodes
in that cluster, otherwise format as specified by cluster_obj() function.
:return: List of strs of node names
'''
if cluster is not None:
cl = self.cluster_obj(cluster)
return cl.node_names()
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_names()
for name in cnodes:
rval.append(name)
return rval
def node_objs(self)->list:
'''
Returns a list of ANPNodes in this network, organized by cluster
:return: List of strs of node names
'''
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_objs()
for name in cnodes:
rval.append(name)
return rval
def cluster_objs(self)->list:
"""
:return: List of ANPCluster objects in the network
"""
return list(self.clusters.values())
def node_connections(self)->np.ndarray:
"""
Returns the node conneciton matrix for this network.
:return: A numpy array of shape [nnode, nnodes] where item [row, col]
1 means there is a node connection from col -> row, and 0 means
no connection.
"""
nnodes = self.nnodes()
nnames = self.node_names()
rval = np.zeros([nnodes, nnodes])
src_node:ANPNode
for src in range(nnodes):
srcname = nnames[src]
src_node = self.node_obj(srcname)
for dest in range(nnodes):
dest_name = nnames[dest]
if src_node.is_node_node_connection(dest_name):
rval[dest,src]=1
return rval
def unscaled_supermatrix(self, username=None, as_df=False)->np.array:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The unscaled supermatrix as a numpy.array of shape [nnode, nnodes]
'''
nnodes = self.nnodes()
rval = np.zeros([nnodes, nnodes])
nodes = self.node_objs()
col = 0
node:ANPNode
for node in nodes:
rval[:,col] = node.get_unscaled_column(username)
col += 1
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def scaled_supermatrix(self, username=None, as_df=False)->np.ndarray:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The scaled supermatrix
'''
rval = self.unscaled_supermatrix(username)
# Now I need to normalized by cluster weights
clusters = self.cluster_objs()
nclusters = len(clusters)
col = 0
for col_cp in range(nclusters):
col_cluster:ANPCluster = clusters[col_cp]
row_nnodes = col_cluster.nnodes()
cluster_pris = col_cluster.prioritizer.priority(username, PriorityType.NORMALIZE)
row_offset = 0
for col_node in col_cluster.node_objs():
row=0
for row_cp in range(nclusters):
row_cluster:ANPCluster = clusters[row_cp]
row_cluster_name = row_cluster.name
if row_cluster_name in cluster_pris:
priority = cluster_pris[row_cluster_name]
else:
priority = 0
for row_node in row_cluster.node_objs():
rval[row, col] *= priority
row += 1
col += 1
normalize(rval, inplace=True)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def global_priority(self, username=None)->pd.Series:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:return: The global priorities Series, index by node name
'''
lm = self.limit_matrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def global_priority_df(self, user_infos=None)->pd.DataFrame:
'''
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: The global priorities dataframe. Rows are the nodes and
columns are the users. The first user/column is the Group Average
'''
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
for user in user_infos:
if user is None:
uname = "Group Average"
else:
uname = user
rval[uname] = self.global_priority(user)
return rval
def limit_matrix(self, username=None, as_df=False):
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The limit supermatrix
'''
sm = self.scaled_supermatrix(username)
rval = self.limitcalc(sm)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def alt_names(self)->list:
'''
:return: List of alt names in this ANP model
'''
if self.has_subnet():
# We have some v1 subnetworks, we get alternative names by looking
# there.
rval = []
node: ANPNode
for node in self.node_objs_with_subnet():
alts = node.subnetwork.alt_names()
for alt in alts:
if alt not in rval:
rval.append(alt)
return rval
else:
return self.alts_cluster.node_names()
def priority(self, username=None, ptype:PriorityType=None)->pd.Series:
'''
Synthesize and return the alternative scores
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param ptype: The priority type to use
:return: A pandas.Series indexed on alt names, values are the score
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
if self.has_subnet():
# Need to synthesize using subnetworks
return self.subnet_synthesize(username=username, ptype=ptype)
else:
gp = self.global_priority(username)
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def data_names(self):
'''
Returns the column headers needed to fill in the data for this model
:return: A list of strings that would be usable in excel for parsing
headers
'''
node:ANPNode
rval = []
cluster: ANPCluster
for cluster in self.cluster_objs():
cluster.data_names(rval)
for node in self.node_objs():
node.data_names(rval)
return rval
def node_connection_matrix(self, new_mat:np.ndarray=None):
'''
Returns the current node conneciton matrix if new_mat is None.
Otherwise, for each item [row, col] in the matrix with a value of 1
we connect from node[row] to node[col].
:param new_mat: The new node connection matrix. If None, we return
the current one.
:return: Current connection matrix.
'''
src_node:ANPNode
nnodes = self.nnodes()
nodes = self.node_objs()
node_names = self.node_names()
if new_mat is not None:
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if new_mat[dest_node_pos, src_node_pos] != 0:
src_node.node_connect(node_names[dest_node_pos])
rval = np.zeros([nnodes, nnodes])
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if src_node.is_node_node_connection(node_names[dest_node_pos]):
rval[dest_node_pos, src_node_pos] = 1
return rval
def import_pw_series(self, series:pd.Series)->None:
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer (or cluster).
The name should be A vs B wrt C, where A, B, C are node or cluster names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
wrtNode:ANPNode
wrtNode = self.node_obj(wrt)
info = info[0].split( ' vs ')
if len(info) < 2:
raise ValueError(" vs was not present in "+name)
row, col = info
rowNode = self.node_obj(row)
colNode = self.node_obj(col)
npri: Pairwise
if (wrtNode is not None) and (rowNode is not None) and (colNode is not None):
# Node pairwise
npri = wrtNode.get_node_prioritizer(rowNode, create=True)
#print("Node comparison "+name)
if not isinstance(npri, Pairwise):
raise ValueError("Node prioritizer was not pairwise")
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
else:
# Try cluster pairwise
wrtcluster = self.cluster_obj(wrt)
rowcluster = self.cluster_obj(row)
colcluster = self.cluster_obj(col)
if wrtcluster is None:
raise ValueError("wrt="+wrt+" was not a cluster, and the group was not a node comparison")
if rowcluster is None:
raise ValueError("row="+row+" was not a cluster, and the group was not a node comparison")
if colcluster is None:
raise ValueError("col="+col+" was not a cluster, and the group was not a node comparison")
npri = self.cluster_prioritizer(wrtcluster)
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
#print("Cluster comparison "+name)
def set_alts_cluster(self, new_cluster):
'''
Sets the new alternatives cluster
:param new_cluster: Cluster specified as cluster_obj() expects.
:return: Nothing
'''
cl = self.cluster_obj(new_cluster)
self.alts_cluster = cl
def import_rating_series(self, series:pd.Series):
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer as ratings (or cluster).
Title should be A wrt B, where A and B are either both node names or
both column names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
dest = info[0].strip()
wrtNode:ANPNode
destNode:ANPNode
wrtNode = self.node_obj(wrt)
destNode = self.node_obj(dest)
npri:Rating
if (wrtNode is not None) and (destNode is not None):
# Node ratings
npri = wrtNode.get_node_prioritizer(destNode, create=True, create_class=Rating)
if not isinstance(npri, Rating):
wrtNode.set_node_prioritizer_type(destNode, Rating)
npri = wrtNode.get_node_prioritizer(destNode, create=True)
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
else:
# Trying cluster ratings
wrtcluster = self.cluster_obj(wrt)
destcluster = self.cluster_obj(dest)
if wrtcluster is None:
raise ValueError("Ratings: wrt is not a cluster wrt="+wrt+" and wasn't a node either")
if destcluster is None:
raise ValueError("Ratings: dest is not a cluster dest="+dest+" and wasn't a node either")
npri = wrtcluster.prioritizer
if not isinstance(npri, Rating):
wrtcluster.set_prioritizer_type(Rating)
npri = wrtcluster.prioritizer
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
def node_prioritizer(self, wrtnode=None, cluster=None):
'''
Gets the prioritizer for node->cluster connection
:param wrtnode: The node as understood by node_obj() function.
:param cluster: Cluster as understood by cluster_obj() function.
:return: If both wrtnode and cluster are specified, a single node prioritizer
is returned for that comparison (or None if there was nothing there).
Otherwise it returns a dictionary indexed by [wrtnode, cluster] and
whose values are the prioritizers for that (only the non-None ones).
'''
if wrtnode is not None and cluster is not None:
node = self.node_obj(wrtnode)
cl_obj = self.cluster_obj(cluster)
cluster_name = cl_obj.name
return node.get_node_prioritizer(dest_node=cluster_name, dest_is_cluster=True)
elif wrtnode is not None:
# Have wrtnode, do not have cluster
rval = {}
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
elif cluster is not None:
# Have cluster, but not wrtnode
rval = {}
for wrtnode in self.node_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
else:
# Both wrtnode and cluster are none, want all
rval = {}
for wrtnode in self.node_names():
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
def subnet(self, wrtnode):
'''
Makes wrtnode have a subnetwork if it did not already.
:param wrtnode: The node to give a subnetwork to, or get the subnetwork
of. Node specified as node_obj() function expects.
:return: The ANPNetwork that is the subnet of this node
'''
node = self.node_obj(wrtnode)
if node.subnetwork is not None:
return node.subnetwork
else:
rval = ANPNetwork(create_alts_cluster=False)
node.subnetwork = rval
rval.default_priority_type = PriorityType.IDEALIZE
return rval
def node_invert(self, node, value=None):
'''
Either sets, or tells if a node is inverted
:param node: The node to do this on, as expected by node_obj() function
:param value: If None, we return the boolean about if this node is
inverted. Otherwise specifies the new value.
:return: T/F if value=None, telling if the node is inverted. Otherwise
returns nothing.
'''
node = self.node_obj(node)
if value is None:
return node.invert
else:
node.invert = value
def has_subnet(self)->bool:
'''
:return: True/False telling if some node had a subentwork
'''
for node in self.node_objs():
if node.subnetwork is not None:
return True
return False
def subnet_synthesize(self, username=None, ptype:PriorityType=None):
'''
Does the standard V1 subnetowrk synthesis.
:param username: The user/users to synthesize for. If None, we group
synthesize across all. If a single user, we sythesize for that user
across all. If it is a list, we synthesize for the group that is that
list of users.
:return: Nothing
'''
# First we need our global priorities
pris = self.global_priority(username)
# Next we need the alternative priorities from each subnetwork
subnets = {}
node:ANPNode
for node in self.node_objs_with_subnet():
p = node.subnetwork.priority(username, ptype)
if node.invert:
p = self.invert_priority(p)
subnets[node.name]=p
rval = self.synthesize_combine(pris, subnets)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def node_objs_with_subnet(self):
"""
:return: List of ANPNode objects in this network that have v1 subnets
"""
return [node for node in self.node_objs() if node.subnetwork is not None]
def invert_priority(self, p):
"""
Makes a copy of the list like element p, and inverts. The current
standard inversion is 1-p. There could be others implemented later.
:param p: The list like to invert
:return: New list-like of same type as p, with inverted priorities
"""
rval = deepcopy(p)
for i in range(len(p)):
rval[i] = 1 - rval[i]
return rval
def synthesize_combine(self, priorities:pd.Series, alt_scores:dict):
"""
Performs the actual sythesis step from anp v1 synthesis.
:param priorities: Priorities of the subnetworks
:param alt_scores: Alt scores as dictionary, keys are subnetwork names
values are Series whose keys are alt names.
:return: Series whose keys are alt names, and whose values are the
synthesized scores.
"""
return self.subnet_formula(priorities, alt_scores)
def cluster_prioritizer(self, wrtcluster=None):
"""
Gets the prioritizer for the clusters wrt a given cluster.
:param wrtcluster: WRT cluster identifier as expected by cluster_obj() function.
If None, then we return a dictionary indexed by cluster names and values
are the prioritizers
:return: THe prioritizer for that cluster, or a dictionary of all cluster
prioritizers
"""
if wrtcluster is not None:
cluster = self.cluster_obj(wrtcluster)
return cluster.prioritizer
else:
rval = {}
for cluster in self.cluster_objs():
rval[cluster.name] = cluster.prioritizer
return rval
def to_excel(self, fname):
struct = pd.DataFrame()
cluster:ANPCluster
writer = pd.ExcelWriter(fname, engine='openpyxl')
for cluster in self.cluster_objs():
cluster_name = cluster.name
if cluster == self.alts_cluster:
cluster_name = "*"+str(cluster_name)
struct[cluster_name] = cluster.node_names()
struct.to_excel(writer, sheet_name="struct", index=False)
# Now the node connections
mat = self.node_connection_matrix()
pd.DataFrame(mat).to_excel(writer, sheet_name="connection", index=False, header=False)
# Lastly let's write just the comparison structure
cmp = self.data_names()
|
pd.DataFrame({"":cmp})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2章 1次元データの整理
# +
import numpy as np
import pandas as pd
pd.set_option("precision", 3)
# -
df = pd.read_csv("../python_stat_sample/data/ch2_scores_em.csv", index_col="生徒番号")
df.head()
# +
# 英語の点数の最初の10個を取得
scores = np.array(df["英語"])[:10]
scores
# +
index = [chr(i+ord("A")) for i in range(10)]
scores_df = pd.DataFrame({"点数":scores},
index=pd.Index(index, name="生徒"))
scores_df
# -
# ## 平均値
#
# \begin{align*}
# \bar{x} = \frac{1}{N} \sum_{i=0}^{N} x_i
# \end{align*}
#
# - $\bar{x}: \text{average}$
# - $N: \text{length of data}$
# - $x_i: \text{each data in }x$
sum(scores)/len(scores)
# numpyを使った方法
np.mean(scores)
# pandasを使った方法
scores_df.mean()
# ## 中央値
# 中央値を導出するためにデータを順番に置き直す
scores_sorted = np.sort(scores)
scores_sorted
# +
n = len(scores_sorted)
if n%2 == 0:
median = (scores_sorted[n//2 - 1] + scores_sorted[n//2])/2
else:
median = scores_sorted[n//2+1]
median
# -
# numpy
np.median(scores)
# pandas
scores_df.median()
# ## 最頻値
tmp_list = [1, 1, 1, 2, 2, 3]
pd.Series(tmp_list).mode()
# multiple modes in list
tmp_list = [i+1 for i in range(5)]
pd.Series(tmp_list).mode()
# ## 偏差
mean = np.mean(scores)
deviation = scores - mean
deviation
# keep copy of scores_df
summary_df = scores_df.copy()
summary_df["偏差"] = deviation
summary_df
scores_ = [50, 60, 58, 54, 51, 56, 57, 53, 52, 59]
mean_ = np.mean(scores_)
deviation_ = scores_ - mean_
deviation_
# +
# mean of deviation_
mean_deviation = np.mean(deviation_)
mean_deviation
# -
# ### 偏差の平均が0になる理由
#
# \begin{align*}
# \frac{1}{n} \sum_{i=1}^{n}(x_i - \bar{x}) = \frac{1}{n} \sum_{i=1}^{n}x_i - \frac{1}{n} \sum_{i=1}^{n} \bar{x}\\
# = \bar{x} - \bar{x}\\
# = 0
# \end{align*}
# ## 分散
var = np.mean(deviation ** 2)
var_np = np.var(scores) # defaults to sample variance
var_pd = scores_df.var() # defaults to unbiased variance
print(f"variance: {var}")
print(f"variance thru numpy: {var_np}")
print(f"variance thru pandas: {var_pd}")
summary_df["偏差二乗"] = np.square(deviation)
summary_df
# ### 標本分散
#
# \begin{align*}
# S^2 = \frac{1}{n} \sum_{i=1}^{n}(x_i - \bar{x})^2 \\
# (n > 0)
# \end{align*}
#
# ### 不偏分散
#
# \begin{align*}
# \sigma^2 = \frac{1}{n-1} \sum_{i=1}^{n}(x_i - \bar{x})^2 \\
# (n > 1)
# \end{align*}
#
# よって標準偏差は以下のようになる
# \begin{align*}
# S = \sqrt{S^2} = \sqrt{\frac{1}{n} \sum_{i=1}^{n}(x_i - \bar{x})^2} \\
# (n > 0)
# \end{align*}
# 標準偏差
np.sqrt(np.var(scores, ddof=0)) # 標本分散を使用
np.std(scores, ddof=0) # 上と同様
# ## 範囲
#
# \begin{align*}
# \it{Rg} = x_{max} - x_{min}
# \end{align*}
# 範囲
np.max(scores) - np.min(scores)
# ただし, これだと一つでも大きい値または, 小さい値があると範囲が極端になってしまう
# そのため, データの上位数%と下位数%の範囲を用いる場合がある
# これを<b>四分位範囲</b> (interquartile range) という
#
# \begin{align*}
# IQR = Q3 - Q1
# \end{align*}
# 四分位範囲
scores_Q1 = np.percentile(scores, 25)
scores_Q3 = np.percentile(scores, 75)
scores_IQR = scores_Q3 - scores_Q1
scores_IQR
# pandas
|
pd.Series(scores)
|
pandas.Series
|
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionBitwiseXorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_bitwise_xor_scalar(self):
self.assertEqual(dnp.bitwise_xor(1, 4), np.bitwise_xor(1, 4))
self.assertEqual(dnp.bitwise_xor(1, -5), np.bitwise_xor(1, -5))
self.assertEqual(dnp.bitwise_xor(0, 9), np.bitwise_xor(0, 9))
def test_function_math_binary_bitwise_xor_list(self):
lst1 = [0, 1, 2]
lst2 = [4, 6, 9]
assert_array_equal(dnp.bitwise_xor(lst1, lst2), np.bitwise_xor(lst1, lst2))
def test_function_math_binary_bitwise_xor_array_with_scalar(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
assert_array_equal(dnp.bitwise_xor(dnpa, 1), np.bitwise_xor(npa, 1))
assert_array_equal(dnp.bitwise_xor(1, dnpa), np.bitwise_xor(1, npa))
def test_function_math_binary_bitwise_xor_array_with_array(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.bitwise_xor(dnpa1, dnpa2), np.bitwise_xor(npa1, npa2))
def test_function_math_binary_bitwise_xor_array_with_array_param_out(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.bitwise_xor(npa1, npa2, out=npa)
dnp.bitwise_xor(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_bitwise_xor_array_with_series(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.bitwise_xor(dnpa, os).to_pandas(), np.bitwise_xor(npa, ps))
assert_series_equal(dnp.bitwise_xor(os, dnpa).to_pandas(), np.bitwise_xor(ps, npa))
pser = pd.Series([1, 2, 4])
oser = orca.Series([1, 2, 4])
assert_series_equal(dnp.bitwise_xor(os, oser).to_pandas(), np.bitwise_xor(ps, pser))
def test_function_math_binary_bitwise_xor_array_with_dataframe(self):
npa = np.array([1])
dnpa = dnp.array([1])
pdf =
|
pd.DataFrame({'A': [4, 6, 9]})
|
pandas.DataFrame
|
import pandas as pd
import pytest
from bach import DataFrame
from tests.functional.bach.test_data_and_utils import assert_equals_data
def test_basic_get_dummies(engine) -> None:
pdf = pd.DataFrame(
{'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], 'C': [1, 2, 3]},
)
df = DataFrame.from_pandas(engine=engine, df=pdf, convert_objects=True)
expected = pd.get_dummies(pdf, dtype='int')
expected.index.name = '_index_0'
expected_columns = ['C', 'A_a', 'A_b', 'B_a', 'B_b', 'B_c']
result = df.get_dummies().sort_index()
assert set(expected_columns) == set(result.data_columns)
result = result[expected_columns]
assert_equals_data(
result[expected_columns],
expected_columns=['_index_0'] + expected_columns,
expected_data=[
[0, 1, 1, 0, 0, 1, 0],
[1, 2, 0, 1, 1, 0, 0],
[2, 3, 1, 0, 0, 0, 1]
],
)
pd.testing.assert_frame_equal(
expected,
result.to_pandas(),
)
def test_get_dummies_dtype(engine) -> None:
pdf = pd.DataFrame(
{'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], 'C': [1, 2, 3]},
)
df = DataFrame.from_pandas(engine=engine, df=pdf, convert_objects=True)
expected_columns = ['C', 'A_a', 'A_b', 'B_a', 'B_b', 'B_c']
# comparison with pandas is different, pandas will return empty space instead of 0.
result = df.get_dummies(dtype='string').sort_index()
assert set(expected_columns) == set(result.data_columns)
result = result[expected_columns]
assert_equals_data(
result[expected_columns],
expected_columns=['_index_0'] + expected_columns,
expected_data=[
[0, 1, '1', '0', '0', '1', '0'],
[1, 2, '0', '1', '1', '0', '0'],
[2, 3, '1', '0', '0', '0', '1']
],
)
def test_get_dummies_prefix(engine) -> None:
pdf = pd.DataFrame(
{'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], 'C': [1, 2, 3]},
)
df = DataFrame.from_pandas(engine=engine, df=pdf, convert_objects=True)
prefix = ['col1', 'col2']
expected =
|
pd.get_dummies(pdf, prefix=prefix, prefix_sep='__', dtype='int')
|
pandas.get_dummies
|
# http://www.vdh.virginia.gov/coronavirus/
from bs4 import BeautifulSoup
import csv
from datetime import datetime
from io import StringIO
import os
import requests
import pandas as pd
# Remove empty rows
def filtered(rows):
return [x for x in rows if "".join([(x[y] or "").strip() for y in x]) != ""]
def run_VA(args):
# Parameters
raw_name = '../VA/raw'
data_name = '../VA/data/data_%s.csv'
now = datetime.now()
links = [("locality", "https://data.virginia.gov/resource/bre9-aqqr.csv"),
("conf", "https://data.virginia.gov/resource/uqs3-x7zh.csv"),
("dist", "https://data.virginia.gov/resource/v5a8-4ahw.csv"),
("age", "https://data.virginia.gov/resource/uktn-mwig.csv"),
("sex", "https://data.virginia.gov/resource/tdt3-q47w.csv"),
("race_ethnicity", "https://data.virginia.gov/resource/9sba-m86n.csv")]
for link in links:
most_recent = ""
exists = os.path.exists(data_name % link[0])
out = []
# If current data file does not exist
if not exists:
version = 0
v_exists = True
while v_exists:
version += 1
v_exists = os.path.exists((data_name % (link[0] + "_V" + str(version))))
version = version - 1
v_df = pd.read_csv((data_name % (link[0] + "_V" + str(version))))
date_col = ""
for col in v_df.columns:
if "date" in col.lower() and "report" in col.lower():
date_col = col
break
# Getting most recent date
dates = (pd.to_datetime(v_df[date_col])).to_list()
most_recent = max(dt for dt in dates if dt < now)
# Getting new dates
new_df =
|
pd.read_csv(link[1])
|
pandas.read_csv
|
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
]
expect_df_first = pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
def run_for_each_time(func):
"""
Decorator that can be applied to any function whose args are (self, time, expect_df) which runs the function
for each time in self.times and picks the appropriate matching expect_df
"""
def run(self):
for t, time in enumerate(self.times):
func(self, time, self.expect_dfs[t])
return run
def test_method_first(self):
result = dero.pandas._map_windows(self.df_period, self.times[0], method='first',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, self.expect_df_first)
@run_for_each_time
def test_method_between(self, time, expect_df):
result = dero.pandas._map_windows(self.df_period, time, method='between',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, expect_df)
class TestLeftMergeLatest(DataFrameTest):
def test_left_merge_latest(self):
expect_df = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00'), Timestamp('1995-02-01 00:00:00')),
('001076', Timestamp('1995-04-01 00:00:00'), Timestamp('1995-03-02 00:00:00')),
('001722', Timestamp('2012-01-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', Timestamp('2012-07-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', numpy.timedelta64('NaT','ns'), numpy.timedelta64('NaT','ns')),
(numpy.datetime64('NaT'), numpy.datetime64('2012-01-01T00:00:00.000000000'), numpy.datetime64('NaT')),
], columns = ['GVKEY', 'Date', 'Date_y'])
lm = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY')
lm_low_mem = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY', low_memory=True)
lm_sql = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2,
on='GVKEY', backend='sql')
assert_frame_equal(expect_df, lm, check_dtype=False)
assert_frame_equal(expect_df.iloc[:-1], lm_low_mem, check_dtype=False)
assert_frame_equal(expect_df, lm_sql, check_dtype=False)
class TestVarChangeByGroups(DataFrameTest):
def test_multi_byvar_single_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, nan),
(10516, 'a', '1/2/2000', 1.02, 0.010000000000000009),
(10516, 'a', '1/3/2000', 1.03, 0.010000000000000009),
(10516, 'a', '1/4/2000', 1.04, 0.010000000000000009),
(10516, 'b', '1/1/2000', 1.05, nan),
(10516, 'b', '1/2/2000', 1.06, 0.010000000000000009),
(10516, 'b', '1/3/2000', 1.07, 0.010000000000000009),
(10516, 'b', '1/4/2000', 1.08, 0.010000000000000009),
(10517, 'a', '1/1/2000', 1.09, nan),
(10517, 'a', '1/2/2000', 1.1, 0.010000000000000009),
(10517, 'a', '1/3/2000', 1.11, 0.010000000000000009),
(10517, 'a', '1/4/2000', 1.12, 0.010000000000000009),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'RET_change'])
vc = dero.pandas.var_change_by_groups(self.df, 'RET', ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
def test_multi_byvar_multi_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, nan, nan),
(10516, 'a', '1/2/2000', 1.02, 1, 0.010000000000000009, 1.0),
(10516, 'a', '1/3/2000', 1.03, 1, 0.010000000000000009, 0.0),
(10516, 'a', '1/4/2000', 1.04, 0, 0.010000000000000009, -1.0),
(10516, 'b', '1/1/2000', 1.05, 1, nan, nan),
(10516, 'b', '1/2/2000', 1.06, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/3/2000', 1.07, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/4/2000', 1.08, 1, 0.010000000000000009, 0.0),
(10517, 'a', '1/1/2000', 1.09, 0, nan, nan),
(10517, 'a', '1/2/2000', 1.1, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/3/2000', 1.11, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/4/2000', 1.12, 1, 0.010000000000000009, 1.0),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight',
'RET_change', 'weight_change'])
vc = dero.pandas.var_change_by_groups(self.df_weight, ['RET','weight'], ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
class TestFillExcludedRows(DataFrameTest):
expect_df_nofill = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00')),
('001076', Timestamp('1995-04-01 00:00:00')),
('001076', Timestamp('2012-01-01 00:00:00')),
('001076', Timestamp('2012-07-01 00:00:00')),
('001722', Timestamp('1995-03-01 00:00:00')),
('001722',
|
Timestamp('1995-04-01 00:00:00')
|
pandas.Timestamp
|
import logging
from datetime import datetime, timedelta
from typing import List, Union, Optional, Generator, Tuple, Dict
from dataclasses import dataclass
from urllib.parse import urljoin
import pandas as pd
from pandas._libs.tslibs.timestamps import Timestamp
from requests import HTTPError
from wetterdienst.core.data import WDDataCore
from wetterdienst.core.sites import WDSitesCore
from wetterdienst.dwd.forecasts.metadata.column_types import (
DATE_FIELDS_REGULAR,
INTEGER_FIELDS,
)
from wetterdienst.dwd.forecasts.metadata import (
DWDForecastDate,
DWDForecastParameter,
DWDMosmixType,
)
from wetterdienst.dwd.forecasts.stations import metadata_for_forecasts
from wetterdienst.dwd.metadata.column_names import DWDMetaColumns
from wetterdienst.dwd.metadata.constants import (
DWD_SERVER,
DWD_MOSMIX_S_PATH,
DWD_MOSMIX_L_SINGLE_PATH,
)
from wetterdienst.dwd.forecasts.access import KMLReader
from wetterdienst.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.util.network import list_remote_files
log = logging.getLogger(__name__)
@dataclass
class DWDMosmixResult:
"""
Result object encapsulating metadata, station information and forecast data.
"""
metadata: pd.DataFrame
forecast: pd.DataFrame
class DWDMosmixData(WDDataCore):
"""
Fetch weather forecast data (KML/MOSMIX_S dataset).
Parameters
----------
station_ids : List
- If None, data for all stations is returned.
- If not None, station_ids are a list of station ids for which data is desired.
parameters: List
- If None, data for all parameters is returned.
- If not None, list of parameters, per MOSMIX definition, see
https://www.dwd.de/DE/leistungen/opendata/help/schluessel_datenformate/kml/mosmix_elemente_pdf.pdf?__blob=publicationFile&v=2 # noqa:E501,B950
"""
def __init__(
self,
mosmix_type: DWDMosmixType,
station_ids: List[str],
parameters: Optional[List[Union[str, DWDForecastParameter]]] = None,
start_date: Optional[
Union[str, datetime, DWDForecastDate]
] = DWDForecastDate.LATEST,
end_date: Optional[Union[str, datetime, timedelta]] = None,
tidy_data: bool = True,
humanize_column_names: bool = False,
) -> None:
"""
Args:
mosmix_type: type of forecast, either small (MOSMIX-S) or large
(MOSMIX-L), as string or enumeration
station_ids: station ids which are being queried from the MOSMIX foreacst
parameters: optional parameters for which the forecasts are filtered
start_date: start date of the MOSMIX forecast, can be used in combination
with end date to query multiple MOSMIX forecasts, or instead used with
enumeration to only query LATEST MOSMIX forecast
end_date: end date of MOSMIX forecast, can be used to query multiple MOSMIX
forecasts available on the server
tidy_data: boolean if pandas.DataFrame shall be tidied and values put in
rows
humanize_column_names: boolean if parameters shall be renamed to human
readable names
"""
if mosmix_type not in DWDMosmixType:
raise ValueError(
"period_type should be one of FORECAST_SHORT or FORECAST_LONG"
)
if station_ids:
station_ids = pd.Series(station_ids).astype(str).tolist()
if parameters:
parameters = (
pd.Series(parameters)
.apply(
parse_enumeration_from_template,
args=(DWDForecastParameter,),
)
.tolist()
)
if not start_date and not end_date:
start_date = DWDForecastDate.LATEST
elif not end_date:
end_date = start_date
elif not start_date:
start_date = end_date
if start_date is not DWDForecastDate.LATEST:
start_date = pd.to_datetime(start_date, infer_datetime_format=True).floor(
"1H"
)
end_date = pd.to_datetime(end_date, infer_datetime_format=True).floor("1H")
if not start_date <= end_date:
raise StartDateEndDateError(
"end_date should be same or later then start_date"
)
# Shift dates to 3, 9, 15, 21 hour format
if mosmix_type == DWDMosmixType.LARGE:
start_date = self.adjust_datetime(start_date)
end_date = self.adjust_datetime(end_date)
self.forecast_type = mosmix_type
self.station_ids = station_ids
self.parameters = parameters
self.start_date = start_date
self.end_date = end_date
self.tidy_data = tidy_data
self.humanize_column_names = humanize_column_names
if mosmix_type == DWDMosmixType.SMALL:
self.freq = "1H" # short forecasts released every hour
else:
self.freq = "6H"
self.kml = KMLReader(station_ids=self.station_ids, parameters=self.parameters)
@property
def metadata(self):
""" Wrapper for forecast metadata """
return metadata_for_forecasts()
@staticmethod
def adjust_datetime(datetime_: datetime) -> datetime:
"""
Adjust datetime to MOSMIX release frequency, which is required for MOSMIX-L
that is only released very 6 hours (3, 9, 15, 21). Datetime is floored
to closest release time e.g. if hour is 14, it will be rounded to 9
Args:
datetime_: datetime that is adjusted
Returns:
adjusted datetime with floored hour
"""
regular_date = datetime_ + pd.offsets.DateOffset(hour=3)
if regular_date > datetime_:
regular_date -= pd.Timedelta(hours=6)
delta_hours = (datetime_.hour - regular_date.hour) % 6
datetime_adjusted = datetime_ -
|
pd.Timedelta(hours=delta_hours)
|
pandas.Timedelta
|
#!env python
import urllib.request
import re
import os
import tempfile
import numpy as np
import pandas as pd
import pdfminer
from pdfminer.high_level import extract_pages
def _extract_text(fn):
found_elements = []
for page_layout in extract_pages(fn, maxpages=1):
for element in page_layout:
if isinstance(element, pdfminer.layout.LTTextBoxHorizontal):
found_elements.append(element)
found_elements.sort(key=lambda e: e.bbox[0])
found_elements.sort(key=lambda e: e.bbox[1], reverse=True)
return [e.get_text().strip() for e in found_elements]
def _parse_numbers(idx, entries):
return [float(e.strip('*').replace(',', '.').strip()) if ',' in e else int(e.strip('*').strip()) for e in entries[idx+1:idx+3]]
def _calc_rolling_average(df):
mask = df['Daily new'] == 0
df.loc[mask, 'Daily new'] = df["Total"].diff()[mask]
df['Daily new 7-day average'] = df['Daily new'].rolling(window=7).mean()
return df
def fetch_stats(out_dir, pdf_dir="", verbose=True):
urls = [
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen+03-07.html",
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen+08-09.html",
"https://www.rhein-neckar-kreis.de/start/landratsamt/coronavirus+fallzahlen.html"
]
p = re.compile('a href="(.+?Faktenblatt_Corona_RNK\.pdf)" title="" target="_blank">')
if verbose:
print("Checking updates...")
pdf_urls = []
for url in urls:
with urllib.request.urlopen(url) as f:
t = f.read().decode("utf-8")
pdf_urls += list(p.findall(t))
tmp_obj = None
if pdf_dir == "":
tmp_obj = tempfile.TemporaryDirectory()
pdf_dir = tmp_obj.name
elif not os.path.exists(pdf_dir):
os.mkdir(pdf_dir)
df_headers = ["Total", "Recovered", "Deaths", "Quarantined", "7 Day Incidents", "Daily new"]
if os.path.exists(os.path.join(out_dir, 'hd_stats.json')):
hd_stats = pd.read_json(os.path.join(out_dir, 'hd_stats.json'), orient="split").T
rnk_stats = pd.read_json(os.path.join(out_dir, 'rnk_stats.json'), orient="split").T
else:
hd_stats = pd.DataFrame(columns=df_headers)
rnk_stats = pd.DataFrame(columns=df_headers)
url_root = "https://www.rhein-neckar-kreis.de/"
for pdf_url in pdf_urls:
pdf_fn = pdf_url.split('/')[-1]
date = pd.Timestamp("20%s-%s-%s"%(pdf_fn[:2], pdf_fn[2:4], pdf_fn[4:6]))
if date in rnk_stats.index:
if verbose:
print(f"Found data on {date.strftime('%Y-%m-%d')}, skipping...")
continue
if not os.path.exists(os.path.join(pdf_dir, pdf_fn)):
print("Downloading %s..."%pdf_fn)
with urllib.request.urlopen(url_root + pdf_url) as f, open(os.path.join(pdf_dir, pdf_fn), "wb") as fo:
fo.write(f.read())
print("Parsing %s..."%pdf_fn)
covid_numbers = np.zeros([2, 6], dtype=float) # Rows: RNK, HD
# Cols: positive, recovered, deaths, quarantined, 7-day-incidences, difference yesterday
covid_numbers[:, 4] = np.NaN
entries = _extract_text(os.path.join(pdf_dir, pdf_fn))
flags = np.zeros(5, dtype=bool)
for idx, e in enumerate(entries):
# RNK: idx == 0
# HD: idx == 1
if not flags[0] and ("Positive" in e or "Gesamtzahl" in e): # Positive
covid_numbers[:, 0] = _parse_numbers(idx, entries)
flags[0] = True
if not flags[1] and "Genesene" in e: # Recovered
if 'Datenbank-Fehlers' in entries[-3] and 'Genesene Personen' in entries[-3]:
# Some numbers are not avilable due to database failure
covid_numbers[:, 1] = np.nan
else:
covid_numbers[:, 1] = _parse_numbers(idx, entries)
flags[1] = True
if not flags[2] and "Verstorbene" in e: # Deaths
covid_numbers[:, 2] = _parse_numbers(idx, entries)
flags[2] = True
if not flags[3] and "7-Tage-Inzidenz" in e: # 7-day-incidences
covid_numbers[:, 4] = _parse_numbers(idx, entries)
flags[3] = True
if not flags[4] and e == "Veränderung zum Vortag": # Daily new
covid_numbers[:, 5] = _parse_numbers(idx, entries)
flags[4] = True
if all(flags):
# found all numbers
break
covid_numbers[:, 3] = covid_numbers[:, 0] - covid_numbers[:, 1] - covid_numbers[:, 2] # Calculate quarantined
rnk_stats = rnk_stats.append(pd.DataFrame([covid_numbers[0]], columns=df_headers, index=[date]))
hd_stats = hd_stats.append(
|
pd.DataFrame([covid_numbers[1]], columns=df_headers, index=[date])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# Sangam2019 High Accuracy Traffic Prediction via PCA
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,classification_report,mean_absolute_error,r2_score
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn import decomposition
get_ipython().run_line_magic('matplotlib', 'inline')
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
le=LabelEncoder()
lr=LinearRegression()
pca=decomposition.PCA()
traffic=pd.read_csv("Train.csv")
traffic
print(traffic.shape)
traffic["is_holiday"].value_counts()
traffic.weather_type=le.fit_transform(traffic.weather_type)
traffic.head()
list(le.classes_)
traffic.loc[traffic['is_holiday']!='None','is_holiday']=1
traffic.loc[traffic['is_holiday']=='None','is_holiday']=0
traffic
traffic['is_holiday'].value_counts()
traffic['date_time'].describe()
traffic[['date','time']]=traffic.date_time.str.split(expand=True)
traffic.head()
col=traffic.columns.tolist()
col=col[-1:]+col[:-1]
col=col[-1:]+col[:-1]
col
traffic=traffic[col]
traffic=traffic.drop('date_time',axis=1)
traffic.head()
traffic['date']= pd.to_datetime(traffic['date'])
traffic.info()
traffic1=traffic
traffic1.head()
traffic1['date']=traffic1['date'].dt.weekday
traffic1['date']=traffic1['date'].astype(int)
traffic1.head()
traffic1['time']=traffic1['time'].str.replace(':00','')
traffic1['time']=traffic1['time'].astype(int)
traffic1.head()
traffic1.info()
traffic1.loc[traffic1['date']=='6','is_holiday']=1
traffic2=traffic1
traffic2=traffic2.drop(['weather_description'],axis=1)
traffic2.head()
traffic2[['temperature','rain_p_h','snow_p_h']]=traffic2[['temperature','rain_p_h','snow_p_h']].astype(int)
traffic2.info()
x_train,x_test,y_train,y_test=train_test_split(traffic2.drop('traffic_volume',axis=1),traffic2['traffic_volume'],test_size=0.2,random_state=5)
lr.fit(x_train,y_train)
prd=lr.predict(x_test)
lr.score(x_test,y_test)
r_sq=r2_score(y_test,prd)
r_sq
pca.n_components=2
pca_data=pca.fit_transform(traffic2)
print('Shape of pca_reduced.shape: ',pca_data.shape)
data=np.vstack((pca_data.T,traffic2['traffic_volume'])).T #T for tranpose, without which h-stack needs to be used
trafficpca=pd.DataFrame(data,columns=('Col1','Col2','Labels'))
trafficpca.head()
x_train1,x_test1,y_train1,y_test1=train_test_split(trafficpca.drop('Labels',axis=1),trafficpca['Labels'],test_size=0.1,random_state=5)
lr.fit(x_train1,y_train1)
prdpca=lr.predict(x_test1)
prdpca
prdpca.shape
lr.score(x_test1,y_test1)
r_sq1=r2_score(y_test1,prdpca)
r_sq1
accuracy_score(y_test1,prdpca.round())
custom_x=[4,9,0,121,89,2,329,1,1,288,0,0,40,1]
traffic_test=
|
pd.read_csv("Test.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""model_bnb_h.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LubfQy8-34FekTlgdarShQ5MUnXa328i
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import math
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings('ignore')
data_path = '/content/Binance_' + 'BNB' + 'USDT_1h.csv'
data_path
df = pd.read_csv(data_path)
df['Volume'] = df['Volume '+'BNB']
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_' + str(n))
df = df.join(MA)
return df
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
df = df.join(EMA)
return df
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
df = df.join(M)
return df
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
df = df.join(ROC)
return df
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
df = df.join(ATR)
return df
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n:
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
df = df.join(B2)
return df
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 =
|
pd.Series(2 * PP - df['Low'])
|
pandas.Series
|
"""Tools for Loop-detection analysis."""
from multiprocessing import Pool
from typing import Tuple, Sequence, Iterator
from dataclasses import dataclass
import numpy as np
import pandas as pd
from scipy import ndimage, stats, sparse
from sklearn.cluster import DBSCAN
from statsmodels.stats import multitest
from .utils.utils import CPU_CORE, suppress_warning
from .utils.numtools import mask_array, index_array, Toeplitz
from .chrommatrix import ChromMatrix, Array
HKernels = Tuple[Sequence[np.ndarray], Tuple[int, int]]
@dataclass
class HiccupsPeaksFinder(object):
chrom_ma: ChromMatrix
inner_radius: int = 2
outer_radius: int = 5
band_width: int = 600
fdrs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
sigs: Tuple[float, float, float, float] = (0.1, 0.1, 0.1, 0.1)
fold_changes: Tuple[float, float, float, float] = (1.5, 1.5, 1.5, 1.5)
num_cpus: int = max(1, CPU_CORE - 2)
def __post_init__(self):
self.kernels: HKernels = self.fetch_kernels(self.inner_radius, self.outer_radius)
def __call__(self) -> pd.DataFrame:
observed = sparse.csr_matrix(self.chrom_ma.ob(sparse=True))
decay = self.chrom_ma.decay()
weights = self.chrom_ma.weights
# fetch chunk slices
chunks: Iterator[Tuple[slice, slice]] = self.get_chunk_slices(
length=self.chrom_ma.shape[0],
band_width=self.band_width,
height=self.band_width,
ov_length=2 * self.outer_radius
)
# fetching backgrounds model for nonzero pixles for each chunk for 4 kernels
with Pool(processes=self.num_cpus) as pool:
params = (
(observed[s1, s2], (decay[s1], decay[s2]), (1 / weights[s1], 1 / weights[s2]),
self.kernels, self.band_width)
for s1, s2 in chunks
)
backgounds = pool.starmap(self.calculate_chunk, params)
# indices are 0-based, plus onto the start index in the original matrix
for (indices, *_), chunk in zip(backgounds, chunks):
x_st, y_st = chunk[0].start, chunk[1].start
indices += np.array([[x_st], [y_st]])
# 1. gathering backgrounds info of all nonzero pixels
indices = np.concatenate([b[0] for b in backgounds], axis=1)
contacts_array = np.concatenate([b[1] for b in backgounds])
lambda_array = np.concatenate([b[2] for b in backgounds], axis=1)
enrich_ratio = np.concatenate([b[3] for b in backgounds])
# print(f'Before multiple test: {indices[0].size}')
# 2. Multiple test. Filtering insignificant point after calculating padj using fdr_bh multiple test method.
pvals, padjs, rejects = self.multiple_test(contacts_array, lambda_array, fdrs=self.fdrs, sigs=self.sigs)
peaks = (indices, contacts_array, lambda_array, enrich_ratio, pvals, padjs)
peaks = tuple(mask_array(np.all(rejects, axis=0), *peaks))
# print(f'After multiple test: {peaks[0][0].size}')
# 3. Apply greedy clustering to merge points into confidant peaks.
peak_indexs, shapes = self.cluster(peaks[0], peaks[1], peaks[2])
peaks = (*tuple(index_array(peak_indexs, *peaks)), shapes)
# print(f'After cluster: {peaks[0][0].size}')
# 4. Filter by gap_region, fold changes(enrichment) and singlet peak's sum-qvalue.
valid_mask = self.filter(peaks, gap_mask=~self.chrom_ma.mask, fold_changes=self.fold_changes)
peaks = tuple(mask_array(valid_mask, *peaks))
# indices, contacts_array, lambda_array, enrich_ratio, pvals, padjs, shape = peaks
# print(f'After filter: {peaks[0][0].size}')
peask_df = self.build_results(peaks, binsize=self.chrom_ma.binsize)
return peask_df
@staticmethod
def fetch_kernels(p: int, w: int) -> HKernels:
"""Return kernels of four regions: donut region, vertical, horizontal, lower_left region.
"""
def region_to_kernel(*regions) -> np.ndarray:
for region in regions:
kernel = np.full((2 * w + 1, 2 * w + 1), 0, dtype=np.int)
for i, j in region:
kernel[i + w, j + w] = 1
yield kernel
def rect(x_start, x_len, y_start, y_len):
return {
(i, j)
for i in range(x_start, x_start + x_len)
for j in range(y_start, y_start + y_len)
}
length = 2 * w + 1
center = rect(-p, 2 * p + 1, -p, 2 * p + 1)
strips = rect(-w, length, 0, 1) | rect(0, 1, -w, length)
donut = rect(-w, length, -w, length) - (center | strips)
vertical = rect(-w, length, -1, 3) - center
horizontal = rect(-1, 3, -w, length) - center
lower_left = rect(1, w, -w, w) - center
return tuple(region_to_kernel(donut, vertical, horizontal, lower_left)), (p, w)
@staticmethod
def get_chunk_slices(length: int,
band_width: int,
height: int,
ov_length: int) -> Iterator[Tuple[slice, slice]]:
"""Return slices of all chunks along the digonal that ensure the band region with specified width is fully covered.\n
Band region's left border is the main diagonal.
"""
band_width *= 2
start = 0
while 1:
y_end = start + band_width
x_end = start + height
if (y_end < length) and (x_end < length):
yield slice(start, x_end), slice(start, y_end)
start += height - ov_length
else:
yield slice(start, length), slice(start, length)
break
@staticmethod
@suppress_warning
def calculate_chunk(observed: Array,
exps: Tuple[np.ndarray, np.ndarray],
factors: Tuple[np.ndarray, np.ndarray],
kernels: HKernels,
band_width: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""For a given chunk, calculate lambda values and contact(true counts) values of each pixel in regions specified in kernels.
"""
ks, (r1, r2) = kernels
num_kernels = len(ks)
try:
if isinstance(observed, sparse.spmatrix):
observed = observed.toarray()
expected = Toeplitz(*exps)[:]
observed[np.isnan(observed)] = 0
zero_region = observed == 0
expected[zero_region] = 0
# calculate lambda array for all nonzero pixels in valid region under each kernel
x, y = observed.nonzero()
dis = y - x
mask = ((dis <= (band_width - 2 * r2))
& (x < (observed.shape[0] - r2))
& (dis >= r2)
& (x >= r2))
x, y = x[mask], y[mask]
if x.size == 0:
return np.empty((2, 0)), np.empty(0), np.empty((num_kernels, 0)), np.empty(0)
ratio_array = np.full((num_kernels, x.size), 0, dtype=np.float)
oe_matrix = observed / expected
for index, kernel in enumerate(ks):
# ob_sum = ndimage.convolve(observed, kernel)
# ex_sum = ndimage.convolve(expected, kernel)
# ratio_array[index] = (ob_sum / ex_sum)[(x, y)]
# Another option
# counts = ndimage.convolve(valid_mat, kernel)
ratio = ndimage.convolve(oe_matrix, kernel) / kernel.sum()
ratio_array[index] = ratio[x, y]
lambda_array = (ratio_array
* expected[x, y]
* factors[0][x]
* factors[1][y])
inner_len = 2 * r1 + 1
outer_len = 2 * r2 + 1
inner_num = inner_len ** 2
percentage = (inner_num / outer_len ** 2)
plateau_ma = oe_matrix - ndimage.percentile_filter(
oe_matrix,
int((1 - percentage) * 100),
(outer_len, outer_len)
)
plateau_region = (plateau_ma > 0).astype(np.int16)
enrich_ratio = ndimage.convolve(
plateau_region,
np.ones((inner_len, inner_len))
)[x, y] / inner_num
nan_mask = np.isnan(lambda_array)
lambda_array[nan_mask] = 0
contacts_array = observed[x, y] * factors[0][x] * factors[1][y]
non_nan_mask = ~(np.any(nan_mask, axis=0) | np.isnan(contacts_array))
indices = np.vstack((x, y))
# Another option is to prefilter by fold changes
return (indices[:, non_nan_mask],
contacts_array[non_nan_mask],
lambda_array[:, non_nan_mask],
enrich_ratio[non_nan_mask])
except Exception as e:
return np.empty((2, 0)), np.empty(0), np.empty((num_kernels, 0)), np.empty(0)
@staticmethod
def multiple_test(contact_array: np.ndarray,
lambda_array: np.ndarray,
fdrs: Tuple[float, float, float, float],
sigs: Tuple[float, float, float, float],
method: str = "fdr_bh") -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Conduct poisson test on each pixel and multiple test correction for all tests.
"""
def lambda_chunks(lambda_array: np.ndarray,
full: bool = False,
base: float = 2,
exponent: float = 1 / 3) -> Iterator[Tuple[float, float, np.ndarray]]:
"""Assign values in lambda_array to logarithmically spaced chunks of every base**exponent range.
"""
min_value = np.min(lambda_array)
num = int(np.ceil(np.log2(np.max(lambda_array)) / exponent) + 1)
lambda_values = np.logspace(
start=0,
stop=(num - 1) * exponent,
num=num,
base=base
)
for start, end in zip(lambda_values[:-1], lambda_values[1:]):
if not full and min_value > end:
continue
mask = (start < lambda_array) & (lambda_array <= end)
yield start, end, mask
num_test, len_test = lambda_array.shape
pvals = np.full((num_test, len_test), 1, np.float)
padjs = np.full((num_test, len_test), 1, np.float)
rejects = np.full((num_test, len_test), False, np.bool)
for test_i in range(num_test):
for _, end, lambda_mask in lambda_chunks(lambda_array[test_i]):
chunk_size = lambda_mask.sum()
if chunk_size == 0:
continue
# poisson_model = stats.poisson(np.ones(chunk_size) * end)
poisson_model = stats.poisson(lambda_array[test_i, lambda_mask])
_pvals = 1 - poisson_model.cdf(contact_array[lambda_mask])
reject, _padjs, _, _ = multitest.multipletests(
pvals=_pvals,
alpha=fdrs[test_i],
method=method
)
rejects[test_i][lambda_mask] = reject
padjs[test_i][lambda_mask] = _padjs
pvals[test_i][lambda_mask] = _pvals
rejects = rejects & (padjs < np.array(sigs)[:, None])
return pvals, padjs, rejects
@staticmethod
def cluster(indices: np.ndarray,
contacts: np.ndarray,
lambda_array: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
dbscan = DBSCAN(2)
dbscan.fit(indices.T)
peak_indexs, shapes = [], []
for cluster_id in set(dbscan.labels_) - {-1}:
point_indexs = np.where(dbscan.labels_ == cluster_id)[0]
points = indices[:, point_indexs]
center_index = np.argmax(
(contacts[point_indexs] / lambda_array[:, point_indexs]).sum(axis=0)
)
center = points[:, center_index]
width = np.abs(points[1] - center[1]).max() * 2 + 1
height = np.abs(points[0] - center[0]).max() * 2 + 1
peak_indexs.append(point_indexs[center_index])
if height >= 2 * width:
height = width
elif width >= 2 * height:
width = height
shapes.append([width, height])
for singlet_index in np.where(dbscan.labels_ == -1)[0]:
peak_indexs.append(singlet_index)
shapes.append([1, 1])
return np.array(peak_indexs), np.array(shapes).T
@staticmethod
def filter(peaks: tuple,
gap_mask: np.ndarray,
fold_changes: Tuple[float, float, float, float] = (2, 1.5, 1.5, 2)) -> np.ndarray:
"""Post-filtering peaks after filtered by mulitple test and megred by clustering:\n
1. Remove peaks close to gap region(bad bins).\n
3. Retain peaks with fold changes over a given threshold in four regions.\n
"""
def enrich_mask(contact_array: np.ndarray,
lambda_array: np.ndarray,
enrich_ratio: np.ndarray) -> np.ndarray:
"""Return mask of valid peaks passed the enrichment fold changes filtering."""
fc_mask = np.all(contact_array
>= lambda_array * np.array(fold_changes)[:, None], axis=0)
ec_mask = enrich_ratio > 0.4
return fc_mask & ec_mask
def away_gap_mask(indices, gap_mask, extend_width) -> np.ndarray:
"""Return mask of valid peaks away from gap regions."""
for _ in range(extend_width):
gap_mask |= np.r_[gap_mask[1:], [False]]
gap_mask |= np.r_[[False], gap_mask[: -1]]
gap_region = set(np.where(gap_mask)[0])
return ~np.array([i in gap_region or j in gap_region
for i, j in zip(*indices)])
indices, contact_array, lambda_array, enrich_ratio, pvals, padjs, shapes = peaks
return away_gap_mask(indices, gap_mask, 1) & enrich_mask(contact_array, lambda_array, enrich_ratio)
@staticmethod
def build_results(peaks_tuple: tuple, binsize: int = 1) -> pd.DataFrame:
"""Aggregate peak-infos into a pd.DataFrame object.
"""
region_names = ['donut', 'horizontal', 'vertical', 'lower_left']
col_names = (['i', 'j', 'ob']
+ ['ex_' + region for region in region_names]
+ ['pval_' + region for region in region_names]
+ ['padj_' + region for region in region_names]
+ ['enrich_ratio', 'width', 'height'])
dtypes = [np.int] * 3 + [np.float] * (len(col_names) - 3)
if not peaks_tuple:
return
|
pd.DataFrame(columns=col_names)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
'x',
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01',
'2011-01-03', '2011-01-04'])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
|
pd.Timestamp('2011-01-03')
|
pandas.Timestamp
|
import pickle
import pandas as pd
import numpy as np
n_components = 20
def get_dataset(path ='./dataset_df', n_sensors = 3, **kwargs):
assert n_sensors < 6
df = pickle.load( open( path, 'rb' ) )
df = df.loc[df['lable'].isin([1,2])]
sensors = [f'sensor_{i}' for i in range(n_sensors)]
feature_names = [f'PC_{i}' for i in range(n_components)]
samples = df.groupby(['time', 'file_index'])
feature_data = None
lables = []
for name, sample in samples:
lables.append(sample.iloc[0]['lable'])
sensor_data = sample.loc[sample['radar'].isin(sensors)]
if feature_data is None:
feature_data = sensor_data[feature_names].to_numpy().flatten()
else:
feature_data = np.vstack((feature_data, sensor_data[feature_names].to_numpy().flatten()))
dataset = np.hstack((feature_data, np.array(lables)[:,np.newaxis]))
feature_names = [f'feature_{i}' for i in range(n_components*n_sensors)]
columns = [*feature_names, 'lable']
dataset_df =
|
pd.DataFrame(dataset, columns=columns)
|
pandas.DataFrame
|
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
|
Timestamp('2016-06-28 16:09:30')
|
pandas.Timestamp
|
import json
import pandas as pd
import pymysql
from sqlalchemy import create_engine
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import pickle
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
pd.set_option('display.max_columns', 500)
with open("credentials.json") as f:
credentials = json.loads(f.read())
host = credentials["db_host"]
user = credentials["db_user"]
password = credentials["db_pass"]
db = credentials["db_name"]
# Automate this query for every route for every direction
engine = create_engine(f"mysql+pymysql://{user}:{password}@{host}:3306/{db}")
routes = pd.read_sql_query('SELECT DISTINCT lineid, direction FROM trips_2017', engine)
for index, row in routes.iterrows():
df = pd.read_sql_query(f'SELECT * FROM trips_2017 WHERE lineid = "{row[0]}" AND direction = {row[1]}', engine)
last_stop_on_route = pd.read_sql_query(f'SELECT max(stop_on_route) as end from combined_2017 WHERE line_id = "{row[0]}" AND direction = {row[1]}', engine)
last_stop = last_stop_on_route['end'].iloc[0]
# Replace missing actual time departure values with planned values
df.actualtime_dep.fillna(df.plannedtime_dep, inplace=True)
# Remove rows with missing values for actual time arrival as we cannot safely assume these are as planned
df = df[pd.notnull(df['actualtime_arr'])]
# Create a new column for trip duration
df['trip_duration'] = df['actualtime_arr'] - df['actualtime_dep']
# Create a new column with the hour of the day the trip took place
df['actualtime_dep_H'] = round(df['actualtime_dep']/3600)
# Hour of day of actual time arrival
df['actualtime_arr_H'] = round(df['actualtime_arr']/3600)
# Average hour of the day of the journey
df['avg_H'] = (df['actualtime_dep_H'] + df['actualtime_arr_H']) / 2
# Convert this to an integer
df['avg_H'] = df['avg_H'].astype(int)
# Creating column solely for the dates to correlate with the dates column on the historical weather data table
df['time'] = df['timestamp'] + df['avg_H'] * 3600
# Removing suppressed rows where suppressed = 1.0
df = df.query('suppressed != 1.0')
# Remove <NAME>, because the times are going to be weird:
df = df.query("timestamp != 1489708800")
# Creating columns from timestamp for further processing
df['dayofweek'] = df['timestamp']
df['monthofyear'] = df['timestamp']
# Converting the unix time to datetime format
df.dayofweek = pd.to_datetime(df['dayofweek'], unit='s')
df.monthofyear = pd.to_datetime(df['monthofyear'], unit='s')
# Converting datetime to name of weekday, and to name of month (in separate columns)
df['dayofweek'] = df['dayofweek'].dt.weekday_name
df['monthofyear'] = df['monthofyear'].dt.month
# Creating dummy variables for weekday names and name of month
df_dayofweek_dummies = pd.get_dummies(df['dayofweek'])
# Using data from the end of the Feb mid-term break till the start of the Easter break
# April was on Sunday the 16th April and the timestamps is Monday April 10th, which is the first Monday of Easter Break
df = df.query('monthofyear == 2 or monthofyear == 3 or monthofyear == 4 and time >= 1487548800 and time < 1491782400')
# Add day of week columns for each day
df1 = pd.concat([df, df_dayofweek_dummies], axis=1, join_axes=[df.index])
# Pull weather data from database
df2 =
|
pd.read_sql_query('SELECT * FROM DarkSky_historical_weather_data WHERE year = 2017 AND month = 3', engine)
|
pandas.read_sql_query
|
import requests
from lxml import etree
import numpy as np
import time
import pandas as pd
import eng_to_ipa as etipa
import os
from tqdm import tqdm
__version__ = '0.0.1.2'
class Pyrics:
def __init__(self,path=None):
if path is None:
self.path = os.path.join(os.getcwd(), 'lyrics')
if not os.path.exists(self.path):
os.makedirs(os.path.join(self.path, 'rhymes'), exist_ok=True)
else:
self.path = path
self.artists = []
if os.path.exists(os.path.join(self.path, 'rhymes')):
for data in os.scandir(os.path.join(self.path, 'rhymes')):
if data.name[-4:] == '.csv':
self.artists.append(self.__process_artist_name(data.name[7:-4]))
self.target = 'https://www.azlyrics.com'
self.__vowel = ['æ', 'ə', 'ɑ', 'ɔ', 'o', 'a', 'e', 'ɛ', 'i', 'ɪ', 'ʊ', 'u', 'ʌ']
#print(f'Data Path: {self.path}')
def download_lyrics(self, artists, iters_num = 1e20, delay_time=10, fluctuate_rate=5):
#search
target = self.target
artist_search = artists
search_url = requests.get(f"https://search.azlyrics.com/search.php?q={artist_search.replace(' ', '+')}")
search_content = etree.HTML(search_url.content)
artist_url = search_content.xpath('//b[contains(string(), "Artist results:")]/../..//a/@href')[0]
artist = self.__find_artist(artists, search_content)
if artist is None:
return
tree = etree.HTML(requests.get(artist_url).content)
#album_names = tree.xpath('//div[@class="album"]/b/text()')
song_names = tree.xpath('//div[@class="listalbum-item"]/a/text()')
song_urls = tree.xpath('//div[@class="listalbum-item"]/a/@href')
for content in zip(song_names[0:5], song_urls[0:5]):
print(f'example songs: {content[0]}: {target + content[1]}')
#download songs
lyrics_dict = dict()
lyrics = np.array([])
songs = np.array([])
bands = []
#delay_time = 10 # delay time to get rid of being baned
total_iters = np.min((iters_num, len(song_urls)))
iters = 1
reconnect_time = 0
with tqdm(total=total_iters) as pbar:
for content in zip(song_names, song_urls):
song_name = content[0]
url = content[1]
fluctuate = np.abs(np.random.randn())* fluctuate_rate
url = requests.get(target + url)
tree = etree.HTML(url.content)
lyric = np.array([l.strip() for l in tree.xpath('//div[5]/text()') if l.strip()!=''])
lyrics = np.hstack([lyrics, lyric])
songs = np.hstack([songs, np.array([song_name for i in range(len(lyric))])])
#comment = [c.strip() for c in tree.xpath('//div[@class="panel album-panel noprint"]/text()') if c.strip()!='']
#print(song_name)
pbar.set_description_str(f'{song_name}: delay time: {delay_time + fluctuate} ')
if len(lyric) == 0:
reconnect_time += 1
if reconnect_time <= 3:
continue
else:
print('You may be banned')
break
pbar.update(1)
iters += 1
if (iters > iters_num or iters == len(song_urls)):
break
#print(f'delay: {delay_time + fluctuate}')
sleep_time = delay_time + fluctuate
if iters % 200 == 0:
sleep_time = 60
time.sleep(sleep_time)
#save file
bands = [artist for i in range(len(lyrics))]
lyrics = lyrics.tolist()
lyrics_dict = {'bands': bands, 'songs': songs, 'lyrics': lyrics}
df =
|
pd.DataFrame(lyrics_dict)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.core.accessor import AccessorProperty
import pandas.plotting._core as gfx
from .client import WattbikeHubClient
from .tools import polar_force_column_labels, flatten
class WattbikeFramePlotMethods(gfx.FramePlotMethods):
polar_angles = np.arange(90, 451) / (180 / np.pi)
polar_force_columns = polar_force_column_labels()
def _plot_single_polar(self, ax, polar_forces, mean, *args, **kwargs):
if 'linewidth' in kwargs:
linewidth = kwargs.pop('linewidth')
elif mean:
linewidth = 3
else:
linewidth = 0.5
ax.plot(self.polar_angles, polar_forces, linewidth=linewidth, *args, **kwargs)
def polar(self, full=False, mean=True, *args, **kwargs):
ax = plt.subplot(111, projection='polar')
if full:
for i in range(0, len(self._data) - 50, 50):
forces = self._data.iloc[i:i + 50, self._data.columns.get_indexer(self.polar_force_columns)].mean()
self._plot_single_polar(ax, forces, mean=False, *args, **kwargs)
if mean:
forces = self._data[self.polar_force_columns].mean()
self._plot_single_polar(ax, forces, mean=True, *args, **kwargs)
xticks_num = 8
xticks = np.arange(0, xticks_num, 2 * np.pi / xticks_num)
ax.set_xticks(xticks)
rad_to_label = lambda i: '{}°'.format(int(i / (2 * np.pi) * 360 - 90) % 180)
ax.set_xticklabels([rad_to_label(i) for i in xticks])
ax.set_yticklabels([])
return ax
class WattbikeDataFrame(pd.DataFrame):
@property
def _constructor(self):
return WattbikeDataFrame
def load(self, session_id):
client = WattbikeHubClient()
if not isinstance(session_id, list):
session_id = [session_id]
for session in session_id:
session_data, ride_session = client.get_session(session)
wdf = self._raw_session_to_wdf(session_data, ride_session)
self = self.append(wdf)
return self
def load_for_user(self, user_id, before=None, after=None):
client = WattbikeHubClient()
if not isinstance(user_id, list):
user_id = [user_id]
for ID in user_id:
sessions = client.get_sessions_for_user(
user_id=ID, before=before, after=after
)
for session_data, ride_session in sessions:
wdf = self._raw_session_to_wdf(session_data, ride_session)
self = self.append(wdf)
return self
def _raw_session_to_wdf(self, session_data, ride_session):
wdf = WattbikeDataFrame(
[flatten(rev) for lap in session_data['laps'] for rev in lap['data']])
wdf['time'] = wdf.time.cumsum()
wdf['user_id'] = ride_session.get_user_id()
wdf['session_id'] = ride_session.get_session_id()
self._process(wdf)
return wdf
def _process(self, wdf):
wdf = wdf._columns_to_numeric()
wdf = wdf._add_polar_forces()
wdf = wdf._add_min_max_angles()
wdf = self._enrich_with_athlete_performance_state(wdf)
return wdf
def _columns_to_numeric(self):
for col in self.columns:
try:
self.iloc[:, self.columns.get_loc(col)] = pd.to_numeric(self.iloc[:, self.columns.get_loc(col)])
except ValueError:
continue
return self
def _add_polar_forces(self):
_df = pd.DataFrame()
new_angles = np.arange(0.0, 361.0)
column_labels = polar_force_column_labels()
if not '_0' in self.columns:
for label in column_labels:
self[label] = np.nan
for index, pf in self.polar_force.iteritems():
if not isinstance(pf, str):
continue
forces = [int(i) for i in pf.split(',')]
forces = np.array(forces + [forces[0]])
forces = forces/np.mean(forces)
angle_dx = 360.0 / (len(forces)-1)
forces_interp = np.interp(
x=new_angles,
xp=np.arange(0, 360.01, angle_dx),
fp=forces)
_df[index] = forces_interp
_df['angle'] = column_labels
_df.set_index('angle', inplace=True)
_df = _df.transpose()
for angle in column_labels:
self[angle] = _df[angle]
return self
def _add_min_max_angles(self):
# @TODO this method is quite memory inefficient. Row by row calculation is better
pf_columns = polar_force_column_labels()
pf_T = self.loc[:, pf_columns].transpose().reset_index(drop=True)
left_max_angle = pf_T.iloc[:180].idxmax()
right_max_angle = pf_T.iloc[180:].idxmax() - 180
left_min_angle = pd.concat([pf_T.iloc[:135], pf_T.iloc[315:]]).idxmin()
right_min_angle = pf_T.iloc[135:315].idxmin() - 180
self['left_max_angle'] = pd.DataFrame(left_max_angle)
self['right_max_angle'] =
|
pd.DataFrame(right_max_angle)
|
pandas.DataFrame
|
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import psycopg2.extensions
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool, Value
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Persistant():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, worker_type, data_tables=[],operations_tables=[]):
self.db_schema = None
self.helper_schema = None
self.worker_type = worker_type
#For database functionality
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Persistant.ROOT_AUGUR_DIR
# count of tuples inserted in the database ( to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host')
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
#add credentials to db config. Goes to databaseable
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
#Return string representation of an object with all information needed to recreate the object (Think of it like a pickle made out of text)
#Called using repr(*object*). eval(repr(*object*)) == *object*
def __repr__(self):
return f"{self.config['id']}"
def initialize_logging(self):
#Get the log level in upper case from the augur config's logging section.
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
#Use stock python formatter for stdout
formatter = Formatter(fmt=format_string)
#User custom for stderr, Gives more info than verbose_format_string
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
#Create more complex sublogs in the logfile directory determined by the AugurLogging class
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
#database interface, the git interfaceable adds additional function to the super method.
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
self.db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.db_schema)})
# , 'client_encoding': 'utf8'
self.helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.logger.info("Trying to find max id of table...")
try:
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
except Exception as e:
self.logger.info(f"Could not find max id. ERROR: {e}")
#25151
#self.logger.info(f"Good, passed the max id getter. Max id: {self.history_id}")
#Make sure the type used to store date is synced with the worker?
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
## Getting rid of nan's and NoneTypes across the dataframe to start:
subject = subject.fillna(value=numpy.nan)
source = source.fillna(value=numpy.nan)
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
## Dealing with an error coming from paginate endpoint and the GitHub issue worker
### For a release in mid september, 2021. #SPG This did not work on Ints or Floats
# if type(source[source_columns[index]].values[0]).isnull():
# subject[subject_columns[index]] = pd.fillna(value=np.nan)
# source[source_columns[index]] = pd.fillna(value=np.nan)
# continue
source_index = source_columns[index]
try:
source_index = source_columns[index]
type_dict[subject_columns[index]] = type(source[source_index].values[0])
#self.logger.info(f"Source data column is {source[source_index].values[0]}")
#self.logger.info(f"Type dict at {subject_columns[index]} is : {type(source[source_index].values[0])}")
except Exception as e:
self.logger.info(f"Source data registered exception: {source[source_index]}")
self.print_traceback("", e, True)
subject = subject.astype(type_dict)
return subject, source
#Convert safely from sql type to python type?
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, action_map={}, in_memory=True
):
"""
This method determines which rows need to be inserted into the database (ensures data ins't inserted more than once)
and determines which rows have data that needs to be updated
:param new_data: list of dictionaries - needs to be compared with data in database to see if any updates are
needed or if the data needs to be inserted
:param table_values: list of SQLAlchemy tuples - data that is currently in the database
:param action_map: dict with two keys (insert and update) and each key's value contains a list of the fields
that are needed to determine if a row is unique or if a row needs to be updated
:param in_memory: boolean - determines whether the method is done is memory or database
(currently everything keeps the default of in_memory=True)
:return: list of dictionaries that contain data that needs to be inserted into the database
:return: list of dictionaries that contain data that needs to be updated in the database
"""
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = pd.DataFrame()
if not in_memory:
new_data_columns = action_map['insert']['source']
table_value_columns = action_map['insert']['augur']
if 'update' in action_map:
new_data_columns += action_map['update']['source']
table_value_columns += action_map['update']['augur']
(new_data_table, table_values_table), metadata, session = self._setup_postgres_merge(
[
self._get_data_set_columns(new_data, new_data_columns),
self._get_data_set_columns(table_values, table_value_columns)
]
)
need_insertion = pd.DataFrame(session.query(new_data_table).join(table_values_table,
eval(
' and '.join([
f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" \
for table_column, source_column in zip(action_map['insert']['augur'],
action_map['insert']['source'])
])
), isouter=True).filter(
table_values_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=table_value_columns)
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=table_value_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(session.query(new_data_table).join(table_values_table,
s.and_(
eval(' and '.join([f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['insert']['augur'], action_map['insert']['source'])])),
eval(' and '.join([f"table_values_table.c.{table_column} != new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['update']['augur'], action_map['update']['source'])]))
) ).all(), columns=table_value_columns)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
new_data_df =
|
pd.DataFrame(new_data)
|
pandas.DataFrame
|
from app.test.base import BaseTestCase
from app.main.service.CreateDocumentHandler import getCreatorDocumentHandler
from app.main.service.languageBuilder import LanguageBuilder
from app.test.fileVariables import pathTexts,pathTables,pathWeb,pathTimes
from app.main.service.personalDataSearchByEntities import PersonalDataSearchByEntities
from app.main.service.personalDataSearchByRules import PersonalDataSearchByRules
from nltk.tokenize import word_tokenize
import unittest
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re,string
import itertools
import time
class ConfidenceMatrixBuilder:
def __init__(self):
self.hits = 0
self.falsePositives = 0
self.falseNegatives = 0
self.nlp = LanguageBuilder().getlanguage()
self.listOfFalseNegatives = []
self.listOfFalsePositives = []
def countHinst(self, listNames:list, data:list, filename:str):
listNames = list(map(lambda name: name.replace('\n',''),listNames))
for name in list(set(data)):
countNameInModel = listNames.count(name)
realCountName = data.count(name)
if countNameInModel == realCountName:
self.hits += countNameInModel
elif countNameInModel == 0:
self.falseNegatives += (realCountName-countNameInModel)
self.listOfFalseNegatives.append((name,countNameInModel,realCountName,filename))
elif countNameInModel < realCountName:
self.hits += realCountName
self.listOfFalseNegatives.append((name,countNameInModel,realCountName,filename))
else:
self.hits += realCountName
self.falsePositives += (countNameInModel-realCountName)
self.listOfFalsePositives.append((name,countNameInModel,realCountName,filename))
for name in list(set(listNames)):
countNameInModel = listNames.count(name)
realCountName = data.count(name)
if realCountName == 0:
self.falsePositives += countNameInModel
self.listOfFalsePositives.append((name,countNameInModel,realCountName, filename))
def getData(self) -> dict:
return {
"hits" :self.hits,
"False Positives":self.falsePositives,
"False Negatives":self.falseNegatives
}
def _buildGarph(self, df:pd.DataFrame, yname, filename, nameTest):
bfn = {nameTest+str(index):0 for index in range(1,11)}
bfp = bfn.copy()
block = df[(df["TYPE"] == 'False Negative') & (df["MATCHES"] == 0)].groupby('FILE').groups
for k,v in block.items():
bfn[k] = len(v)
print(bfn)
plt.subplots_adjust(hspace=0.5)
plt.subplot(211)
plt.bar(bfn.keys(), bfn.values(), align='center', alpha=0.5)
plt.ylabel(yname)
plt.title('False Negative')
block = df[df["TYPE"] == 'False Positive'].groupby('FILE').groups
for k,v in block.items():
bfp[k] = len(v)
print(bfp)
plt.subplot(212)
plt.bar(bfp.keys(), bfp.values(), align='center', alpha=0.5)
plt.ylabel(yname)
plt.title('False Positive')
plt.savefig(filename)
plt.close()
def saveReport(self,csvfile, imgfile, nameTest):
table = {"NAMES":[], "MATCHES":[], "REAL MARCHES": [], "TYPE": [], "FILE":[]}
for name,matches,realMatches,file in self.listOfFalsePositives:
table["NAMES"].append(name)
table["MATCHES"].append(matches)
table["REAL MARCHES"].append(realMatches)
table["TYPE"].append("False Positive")
table["FILE"].append(file)
for name,matches,realMatches,file in self.listOfFalseNegatives:
table["NAMES"].append(name)
table["MATCHES"].append(matches)
table["REAL MARCHES"].append(realMatches)
table["TYPE"].append("False Negative")
table["FILE"].append(file)
df = pd.DataFrame(table, columns=table.keys())
df.to_csv(csvfile, index=False)
self._buildGarph(df,'count',imgfile, nameTest)
def getListOfFalseNegatives(self):
return self.listOfFalseNegatives
def getListOfFalsePositive(self):
return self.listOfFalsePositives
class TestPerfomanceTables(BaseTestCase):
def test_tables(self):
iteration = 11
builder = ConfidenceMatrixBuilder()
print("\n")
for index in range(1,iteration):
with open(pathTables + "%s.json" %(index), encoding='utf-8') as file:
data = json.load(file)
creator = getCreatorDocumentHandler(pathTables + "%s.xls" %(index),'xls')
dh = creator.create()
listNames,_ = dh.extractData()
builder.countHinst(listNames,data['names'],"tables%s" %(index))
#print(pathTables + "%s.xls" %(index), ":", len(listNames), "names")
print(builder.getData())
builder.saveReport('app/test/result/tables_report.csv', 'app/test/result/tables_report.jpg', 'tables')
class TestPerfomanceTexts(BaseTestCase):
def test_text(self):
iteration = 11
builder = ConfidenceMatrixBuilder()
print("\n")
for index in range(1,iteration):
with open(pathTexts + "%s.json" %(index)) as file:
data = json.load(file)
creator = getCreatorDocumentHandler(pathTexts + "%s.txt" %(index),'txt')
dh = creator.create()
listNames,_ = dh.extractData()
#print(pathTexts + "%s.txt" %(index), ":", len(listNames), "names")
builder.countHinst(listNames,data['names'],"text%s" %(index))
print(builder.getData())
builder.saveReport('app/test/result/text_report.csv','app/test/result/text_report.jpg', 'text')
@unittest.skip
class TestPerfomanceWeb(BaseTestCase):
def test_web(self):
iteration = 11
builder = ConfidenceMatrixBuilder()
print("\n")
for index in range(1,iteration):
with open(pathWeb + "%s.json" %(index), encoding='utf-8') as file:
data = json.load(file)
creator = getCreatorDocumentHandler(pathWeb + "%s.html" %(index),'html')
dh = creator.create()
listNames,_ = dh.extractData()
builder.countHinst(listNames,data['names'], "web%s" %(index))
#print(pathWeb + "%s.html" %(index), ":", len(listNames), "names")
print(builder.getData())
builder.saveReport('app/test/result/web_report.csv','app/test/result/web_report.jpg', 'web')
def test_time_of_Model():
entModel = PersonalDataSearchByEntities()
rulesModel = PersonalDataSearchByRules()
def getMesures(text:str) -> list:
st = time.time()
data = entModel.searchPersonalData(text)
ent_times = time.time()-st
ent_len = len(data[0]) + len(data[1])
st = time.time()
data = rulesModel.searchPersonalData(text)
rules_times = time.time()-st
rules_len = len(data[0]) + len(data[1])
return [ent_times,ent_len,rules_times,rules_len, len(word_tokenize(text))]
with open(pathTimes, "r", encoding='latin-1') as file:
texts = file.read()
texts = re.sub('<.*>','lineSplit',texts)
texts = re.sub('ENDOFARTICLE.','',texts)
punctuationNoPeriod = "[" + re.sub("\.","",string.punctuation) + "]"
texts = re.sub(punctuationNoPeriod, "", texts)
list_texts = texts.split('lineSplit')
mesures = np.array(
list(filter(lambda row: row[1]*row[3]*row[4] != 0,
map(lambda text: getMesures(text),
list_texts
)
)
)
)
df =
|
pd.DataFrame(mesures)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import datetime
import numpy as np
import pandas as pd
import spacy
import en_core_web_sm
from spacytextblob.spacytextblob import SpacyTextBlob
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[5]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# In[13]:
# initialize the model
# params: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
# Load English parser and text blob (for sentiment analysis)
model = spacy.load('en_core_web_sm')
spacy_text_blob = SpacyTextBlob()
model.add_pipe(spacy_text_blob)
return model
# In[15]:
# returns a fit info json object
def fit(model,df,param):
returns = {}
return returns
# In[36]:
def apply(model,df,param):
X = df[param['feature_variables']].values.tolist()
temp_data=list()
for i in range(len(X)):
doc = model(str(X[i]))
polarity=doc._.sentiment.polarity
subjectivity=doc._.sentiment.subjectivity
assessments=doc._.sentiment.assessments
temp_data.append([polarity,subjectivity,assessments])
column_names=["polarity","subjectivity","assessments"]
returns=
|
pd.DataFrame(temp_data, columns=column_names)
|
pandas.DataFrame
|
import argparse
from pathlib import Path
import torch
import pandas as pd
from PIL import Image
from tqdm import tqdm
import yolov3.utils.utils as utils
import yolov3.utils.vis_utils as vis_utils
from yolov3.datasets.imagefolder import ImageFolder
from yolov3.models.yolov3 import YOLOv3
from yolov3.utils.parse_yolo_weights import parse_yolo_weights
def parse_args():
"""Parse command line arguments.
"""
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--input_path", type=Path, required=True,
help="image path or directory path which contains images to infer")
parser.add_argument("--output_dir", type=Path, default="infer_output",
help="directory path to output detection results")
parser.add_argument("--weights_path", type=Path, required=True,
help="path to weights file")
parser.add_argument("--config_path", type=Path, default="config/yolov3_coco.yaml",
help="path to config file")
parser.add_argument('--gpu_id', type=int, default=-1,
help="GPU id to use")
# fmt: on
args = parser.parse_args()
return args
def output_to_dataframe(output, class_names):
detection = []
for x1, y1, x2, y2, obj_conf, class_conf, label in output:
bbox = {
"confidence": float(obj_conf * class_conf),
"class_id": int(label),
"class_name": class_names[int(label)],
"x1": int(x1),
"y1": int(y1),
"x2": int(x2),
"y2": int(y2),
}
detection.append(bbox)
detection =
|
pd.DataFrame(detection)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import joblib
from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals
from GeneralAnalyser import GeneralAnalyser, plot_measurements
# plt.interactive(True)
pd.options.display.max_columns = 15
pic_prefix = 'pic/'
# data_path = 'data/CSV'
# data_path = 'Anonimised Data/Data'
# sessions_dict = joblib.load('data/sessions_dict')
sessions_dict = joblib.load('data/sessions_dict')
gamedata_dict = joblib.load('data/gamedata_dict')
sensors_columns_dict = {
'hrm': ['hrm'],
'envibox': ['als', 'mic', 'humidity', 'temperature', 'co2'],
'datalog': ['hrm2', 'resistance', 'muscle_activity']
}
sensors_list = list(sensors_columns_dict.keys())
sensors_columns_list = []
for session_id, session_data_dict in sessions_dict.items():
df_dict = {}
if not set(sensors_list).issubset(set(session_data_dict.keys())):
continue
if session_id not in gamedata_dict:
continue
df_discretized_list = []
for sensor_name in sensors_columns_dict:
df = session_data_dict[sensor_name]
df = df.set_index(pd.DatetimeIndex(pd.to_datetime(df['time'], unit='s')))
df_discretized = df.resample('100ms').mean().ffill() # Forward fill is better
df_discretized_list.append(df_discretized)
moments_kills = gamedata_dict[session_id]['times_kills']
moments_death = gamedata_dict[session_id]['times_is_killed']
duration = 1
intervals_shootout = gamedata_dict[session_id]['shootout_times_start_end']
intervals_kills = get_intervals_from_moments(moments_kills, interval_start=-duration, interval_end=duration)
intervals_death = get_intervals_from_moments(moments_death, interval_start=-duration, interval_end=duration)
event_intervals_shootout = EventIntervals(intervals_list=intervals_shootout, label='shootouts', color='blue')
event_intervals_kills = EventIntervals(intervals_list=intervals_kills, label='kills', color='green')
event_intervals_death = EventIntervals(intervals_list=intervals_death, label='deaths', color='red')
def discretize_time_column(time_column, discretization=0.1):
time_column_discretized = time_column - time_column % discretization
return time_column_discretized
def auxilary_discretization_table(time_column, discretization):
time_column_discretized = discretize_time_column(df['time'], discretization)
timesteps = np.arange(0, time_column_discretized.max() + discretization, discretization)
'''
If there are several records to one timestep => select the earliest
If there isn't any records to one timestep => select the latest available
'''
|
pd.PeriodIndex(df['time'])
|
pandas.PeriodIndex
|
#coding=utf8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import torch
from torch import nn, optim
import numpy as np
import sys
import matplotlib.pyplot as plt
from torchsummary import summary
import matplotlib.pyplot as plt
import pandas as pd
import os
import torch.nn.functional as F
import time
from utils import *
import argparse
class HSICBottleneck:
def __init__(self, args):
self.model = MLP(args)
self.model.to(device)
self.batch_size = args.batchsize
self.lambda_0 = args.lambda_
self.sigma = args.sigma_
self.extractor = 'hsic'
self.last_linear = "output_layer"
self.HSIC = compute_HSIC(args.HSIC)
self.kernel = compute_kernel()
self.kernel_x = args.kernel_x
self.kernel_h = args.kernel_h
self.kernel_y = args.kernel_y
self.forward = args.forward
self.opt = optim.AdamW(self.model.parameters(), lr=0.001)
self.iter_loss1, self.iter_loss2, self.iter_loss3 = [], [], []
self.track_loss1, self.track_loss2, self.track_loss3 = [], [], []
self.loss = args.loss
if self.loss == "mse": self.output_criterion = nn.MSELoss()
elif self.loss == "CE": self.output_criterion = nn.CrossEntropyLoss()
def step(self, input_data, labels):
labels_float = F.one_hot(labels, num_classes=10).float()
if self.forward == "x": Kx = self.kernel(input_data, self.sigma, self.kernel_x)
Ky = self.kernel(labels_float, self.sigma, self.kernel_y)
kernel_list = list()
y_pred, hidden_zs = self.model(input_data)
for num, feature in enumerate(hidden_zs): kernel_list.append(self.kernel(feature, self.sigma, self.kernel_h))
total_loss1, total_loss2, total_loss3 = 0., 0., 0.
for num, feature in enumerate(kernel_list):
if num == (len(hidden_zs)-1):
if self.forward == "h": total_loss1 += self.HSIC(feature, kernel_list[num-1], self.batch_size, device)
elif self.forward == "x": total_loss1 += self.HSIC(feature, Kx, self.batch_size, device)
if self.loss == "mse": total_loss3 += self.output_criterion(hidden_zs[-1], labels_float)
elif self.loss == "CE": total_loss3 += self.output_criterion(hidden_zs[-1], labels)
elif num == 0:
if self.forward == "x": total_loss1 += self.HSIC(feature, Kx, self.batch_size, device)
total_loss2 += - self.lambda_0*self.HSIC(feature, Ky, self.batch_size, device)
else:
if self.forward == "h": total_loss1 += self.HSIC(feature, kernel_list[num-1], self.batch_size, device)
elif self.forward == "x": total_loss1 += self.HSIC(feature, Kx, self.batch_size, device)
total_loss2 += - self.lambda_0*self.HSIC(feature, Ky, self.batch_size, device)
if self.forward == "h" or self.forward == "x":
total_loss = total_loss1 + total_loss2 + total_loss3
self.iter_loss1.append(total_loss1.item())
if self.forward == "n":
total_loss = total_loss2 + total_loss3
self.iter_loss1.append(-1)
self.opt.zero_grad()
total_loss.backward()
self.opt.step()
self.iter_loss2.append(total_loss2.item())
self.iter_loss3.append(total_loss3.item())
def update_loss(self):
self.track_loss1.append(np.mean(self.iter_loss1))
self.track_loss2.append(np.mean(self.iter_loss2))
self.track_loss3.append(np.mean(self.iter_loss3))
self.iter_loss1, self.iter_loss2, self.iter_loss3 = [], [], []
def tune_output(self, input_data, labels):
self.model.train()
if self.loss == "mse":
one_hot_labels = F.one_hot(labels, num_classes=10)
labels = F.one_hot(labels, num_classes=10).float()
y_pred, hidden_zs = self.model(input_data)
total_loss = self.output_criterion(hidden_zs[-1], labels)
self.opt.zero_grad()
total_loss.backward()
self.opt.step()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default="mnist")
parser.add_argument('--loss', type=str, default="CE")
parser.add_argument('--HSIC', type=str, default="nHSIC")
parser.add_argument('--kernel_x', type=str, default="rbf", choices=["rbf", "student"])
parser.add_argument('--kernel_h', type=str, default="rbf", choices=["rbf", "student"])
parser.add_argument('--kernel_y', type=str, default="rbf", choices=["rbf", "student"])
parser.add_argument('--sigma_', type=int, default=10)
parser.add_argument('--lambda_', type=int, default=1000)
parser.add_argument('--batchsize', type=int, default=256)
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--bn_affine', type=int, default=0)
parser.add_argument('--forward', type=str, default="n", choices=["x", "h", "n"])
args, _ = parser.parse_known_args()
filename = get_filename(args)
print(filename)
torch.manual_seed(1)
device = "cuda:{}".format(args.device)
batch_size = args.batchsize
train_loader, test_loader = load_data(batch_size=args.batchsize)
logs = list()
hsic = HSICBottleneck(args)
start = time.time()
for epoch in range(100):
hsic.model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(args.batchsize, -1)
hsic.step(data.view(args.batchsize, -1).to(device), target.to(device))
hsic.tune_output(data.view(args.batchsize, -1).to(device), target.to(device))
if epoch in range(0, 100, 10):
show_result(hsic, train_loader, test_loader, epoch, logs, device)
print("{:.2f}".format(time.time()-start))
start = time.time()
txt_path = os.path.join("./results", filename+".csv")
df =
|
pd.DataFrame(logs)
|
pandas.DataFrame
|
# Same as SBM_vs_uncorrelated.py, but for lambda_k distances. Can be run for
# various numberes of communities, designated below by variable l.
import netcomp as nc
from joblib import Parallel, delayed
import multiprocessing
import os
import networkx as nx
import pandas as pd
import time
import pickle
import numpy as np
from tqdm import tqdm
####################################
### SET PARAMETERS
####################################
data_dir = "../pickled_data"
num_cores = multiprocessing.cpu_count()
# size of ensemble
ensemble_len = 500
n = 1000
p = 0.02
l = 2 # number of communities
n = n - n % l # ensures that n/l is an integer
t = 0.05
# in the main body of the paper we use l=2 and t = 1/20
def pp(n, p, l, t):
"""calculate pp,qq for SBM given p from ER
p : p from G(n,p)
l : # of communities
t : ratio of pp/qq
"""
pp = p * n * (n - 1) / (n ** 2 / l - n + t * n ** 2 * (l - 1) / l)
qq = t * pp
return pp, qq
pp, qq = pp(n, p, l, t)
####################################
## DEFINE IMPORTANT FUNCTIONS
####################################
def distance(dist_func, A, B):
return dist_func(A, B)
k_list = [1, 2, 10, 100, 300, 999]
def flatten(l):
return [item for sublist in l for item in sublist]
distances = []
labels = []
def return_lambdas(k):
return [
lambda A1, A2: nc.lambda_dist(A1, A2, kind="adjacency", k=k),
lambda A1, A2: nc.lambda_dist(A1, A2, kind="laplacian", k=k),
lambda A1, A2: nc.lambda_dist(A1, A2, kind="laplacian_norm", k=k),
]
# can't make this work without using map
distances = list(map(return_lambdas, k_list))
distances = flatten(distances)
for k in k_list:
labels_k = [
"Lambda (Adjacency, k={})".format(k),
"Lambda (Laplacian, k={})".format(k),
"Lambda (Normalized Laplacian, k={})".format(k),
]
labels += labels_k
def grab_data(i, null=True):
G1 = nx.erdos_renyi_graph(n, p)
if null:
G2 = nx.erdos_renyi_graph(n, p)
else:
G2 = nx.planted_partition_graph(l, n // l, pp, qq)
A1, A2 = [nx.adjacency_matrix(G).todense() for G in [G1, G2]]
adj_distances = pd.Series(
[distance(dfun, A1, A2) for dfun in distances],
index=labels,
name="Adjacency Distances",
)
data = pd.concat([adj_distances], axis=1)
return data
####################################
## TAKE DATA
####################################
print("Running on {} cores.".format(num_cores))
print("ER/SBM Lambda K Distance Comparison.")
start = time.time()
results_null = Parallel(n_jobs=num_cores)(
delayed(grab_data)(i) for i in tqdm(range(ensemble_len))
)
end = time.time()
print("Null data complete. Total time elapsed: {} seconds.".format(end - start))
results_df_null = pd.concat(results_null, axis=1)
start = time.time()
results_not_null = Parallel(n_jobs=num_cores)(
delayed(grab_data)(i, null=False) for i in tqdm(range(ensemble_len))
)
end = time.time()
print("Alternative data complete. Total time elapsed: {} seconds.".format(end - start))
results_df_not_null =
|
pd.concat(results_not_null, axis=1)
|
pandas.concat
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa_auto.model import TabularClfModel, FloatKnob, CategoricalKnob, FixedKnob, IntegerKnob, utils
from singa_auto.constants import ModelDependency
from singa_auto.model.dev import test_model_class
from sklearn.metrics import roc_auc_score, roc_curve
from lightgbm import LGBMClassifier
import lightgbm as lgb
from sklearn.model_selection import KFold, StratifiedKFold
import json
import os
import tempfile
import numpy as np
import base64
import pandas as pd
import abc
import gc
import pickle
from urllib.parse import urlparse, parse_qs
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
'''
This model is desigined for Home Credit Default Risk `https://www.kaggle
.com/c/home-credit-default-risk` and only uses the main table
'application_{train|test}.csv' of this competition as dataset.
'''
class LightGBM(TabularClfModel):
@staticmethod
def get_knob_config():
return {
'learning_rate': FloatKnob(1e-2, 1e-1, is_exp=True),
'num_leaves': IntegerKnob(20, 60),
'colsample_bytree': FloatKnob(1e-1, 1),
'subsample': FloatKnob(1e-1, 1),
'max_depth': IntegerKnob(1, 10),
}
def __init__(self, **knobs):
self._knobs = knobs
self.__dict__.update(knobs)
def train(self,
dataset_url,
features=None,
target=None,
exclude=None,
**kwargs):
utils.logger.define_plot('Loss Over Epochs',
['loss', 'early_stop_val_loss'],
x_axis='epoch')
self._features = features
self._target = target
df = pd.read_csv(dataset_url, index_col=0)
if exclude and set(df.columns.tolist()).intersection(
set(exclude)) == set(exclude):
df = df.drop(exclude, axis=1)
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Extract X & y from dataframe
(X, y) = self._extract_xy(df)
# Encode categorical features
X = self._encoding_categorical_type(X)
# other preprocessing
df_train = self._preprocessing(X)
# Cross validation model
folds = KFold(n_splits=10, shuffle=True)
flag = 0
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, y)):
lgb_train = lgb.Dataset(
X.iloc[train_idx],
y.iloc[train_idx],
)
lgb_valid = lgb.Dataset(
X.iloc[valid_idx],
y.iloc[valid_idx],
)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'cross_entropy',
'nthread': 4,
'n_estimators': 10,
'learning_rate': self._knobs.get("learning_rate"),
'num_leaves': self._knobs.get("num_leaves"),
'colsample_bytree': self._knobs.get("colsample_bytree"),
'subsample': self._knobs.get("subsample"),
'max_depth': self._knobs.get("max_depth"),
'verbose': -1,
}
abc = {}
self._model = lgb.train(params,
lgb_train,
num_boost_round=1000,
valid_sets=[lgb_train, lgb_valid],
verbose_eval=100,
callbacks=[lgb.record_evaluation(abc)])
utils.logger.log(
loss=abc['training']['cross_entropy'][-1],
early_stop_val_loss=abc['valid_1']['cross_entropy'][-1],
epoch=flag)
flag += 1
def evaluate(self, dataset_url, **kwargs):
df = pd.read_csv(dataset_url, index_col=0)
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Extract X & y from dataframe
(X, y) = self._extract_xy(df)
# Encode categorical features, no need mapping features for this model
X = self._encoding_categorical_type(X)
# other preprocessing
df_train = self._preprocessing(X)
# oof_preds = np.zeros(df.shape[0])
oof_preds = self._model.predict(X)
return roc_auc_score(y, oof_preds)
def predict(self, queries):
df = pd.DataFrame(queries)
# Extract X & y from dataframe
(X, y) = self._extract_xy(df)
# Encode categorical features
X = self._encoding_categorical_type(X)
# other preprocessing
df_train = self._preprocessing(X)
predictions = self._model.predict(X)
predicts = []
for prediction in predictions:
predicts.append(prediction)
return predicts
def destroy(self):
pass
def dump_parameters(self):
params = {}
# Save model parameters
with tempfile.NamedTemporaryFile() as tmp:
# Save whole model to temp h5 file
self._model.save_model(tmp.name)
# Read from temp h5 file & encode it to base64 string
with open(tmp.name, 'rb') as f:
h5_model_bytes = f.read()
data_config_bytes = pickle.dumps([self._features, self._target])
params['h5_model_base64'] = base64.b64encode(h5_model_bytes).decode(
'utf-8')
params['data_config_base64'] = base64.b64encode(
data_config_bytes).decode('utf-8')
return params
def load_parameters(self, params):
# Load model parameters
h5_model_base64 = params.get('h5_model_base64', None)
data_config_base64 = params.get('data_config_base64', None)
data_config_bytes = base64.b64decode(data_config_base64.encode('utf-8'))
self._features, self._target = pickle.loads(data_config_bytes)
with tempfile.NamedTemporaryFile() as tmp:
# Convert back to bytes & write to temp file
h5_model_bytes = base64.b64decode(h5_model_base64.encode('utf-8'))
with open(tmp.name, 'wb') as f:
f.write(h5_model_bytes)
# Load model from temp file
self._model = lgb.Booster(model_file=tmp.name)
def _extract_xy(self, data):
if self._target is None:
self._target = 'TARGET'
y = data[self._target] if self._target in data else None
if self._features is None:
X = data.drop(self._target, axis=1)
self._features = list(X.columns)
else:
X = data[self._features]
return (X, y)
def _encoding_categorical_type(self, df):
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
for col in df.columns:
if df[col].dtype == 'object':
df[col] = df[col].astype('category')
return df
def _preprocessing(self, df):
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
# Some simple new features (percentages)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
return df
def _features_mapping(self, df):
pass
def _build_model(self):
pass
if __name__ == '__main__':
curpath = os.path.join(os.environ['HOME'], 'singa_auto')
os.environ.setdefault('WORKDIR_PATH', curpath)
os.environ.setdefault('PARAMS_DIR_PATH', os.path.join(curpath, 'params'))
train_set_url = os.path.join(curpath, 'data', 'application_train_index.csv')
valid_set_url = train_set_url
test_set_url = os.path.join(curpath, 'data', 'application_test_index.csv')
test_queries =
|
pd.read_csv(test_set_url, index_col=0)
|
pandas.read_csv
|
from navbar import create_navbar, create_navbar2
import dash
import dash_bootstrap_components as dbc
# from dash import html
# from dash import dcc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_extensions import Download
from dash_extensions.snippets import send_data_frame
import pandas as pd
import plotly.express as px
app = dash.Dash(__name__)
nav = create_navbar2()
header = html.H3('Welcome to page 2!')
#data
df = pd.read_csv('./Data/my_backup.csv') #data frame
df['Date'] =
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
def calculateAnyProfile(profileType, df_labs, df_meds, df_procedures, df_diagnoses, df_phenotypes):
"""Calculate a single profile based on the type provided and data cleaned from getSubdemographicsTables
Arguments:
profileType -- which individual profile type you would like generated, this will be the category with the header information
(Options: 'labs', 'medications', 'procedures', 'diagnoses', 'phenotypes')
Keywords:
df_labs -- labs dataframe returned from getSubdemographicsTables
df_medications -- medications dataframe returned from getSubdemographicsTables
df_procedures -- procedures dataframe returned from getSubdemographicsTables
df_diagnoses -- diagnoses dataframe returned from getSubdemographicsTables
df_phenotypes -- phenotypes dataframe returned from getSubdemographicsTables
Returns Pythonic structures needed to generate profile in JSON format using the corresponding write profile function
"""
import os
import sys
import sqlalchemy
import urllib.parse
import pandas as pd
import numpy as np
import getpass
from dataclasses import dataclass
from SciServer import Authentication
from datetime import datetime
import pymssql
try:
# Make Labs Profile
if profileType == 'labs':
# High Level Info, Scalar Distribution
labs_counts = df_labs.LAB_LOINC.value_counts()
grouped_labs = df_labs.groupby(['LAB_LOINC', 'resultYear'])
labs_frequencyPerYear = (df_labs.groupby(['LAB_LOINC','PATID','resultYear']).PATID.size()
.groupby(['LAB_LOINC','resultYear']).aggregate(np.mean))
labs_fractionOfSubjects = (np.divide(df_labs.groupby(['LAB_LOINC']).PATID.nunique(),
df_labs.PATID.nunique()))
labs_units = df_labs.groupby(['LAB_LOINC']).LOINC_UNIT.unique()
labs_names = df_labs.groupby(['LAB_LOINC']).LOINC_SHORTNAME.unique()
def percentile(n):
def percentile_(x):
return x.quantile(n*0.01)
percentile_.__name__ = '%s' % n
return percentile_
labs_stats = (grouped_labs
.RESULT_NUM.agg(['min','max', 'mean','median','std',
percentile(10), percentile(20), percentile(30),
percentile(40), percentile(50), percentile(60),
percentile(70), percentile(80), percentile(90)]))
def fracsAboveBelowNormal(x):
try:
aboveNorm = np.divide(np.sum(x.RESULT_NUM > x.range_high), x.RESULT_NUM.size)
belowNorm = np.divide(np.sum(x.RESULT_NUM < x.range_low), x.RESULT_NUM.size)
return pd.Series({'aboveNorm':aboveNorm, 'belowNorm':belowNorm})
except:
return pd.Series({'aboveNorm':np.nan, 'belowNorm':np.nan})
labs_aboveBelowNorm = (grouped_labs.apply(fracsAboveBelowNormal))
labs_correlatedLabsCoefficients = (df_labs.groupby(['LAB_LOINC','resultYear','PATID'])
.RESULT_NUM.mean())
labs_abscorrelation = 0
## LABS TO MEDICATIONS
def patientsAboveBelowNormalLabsMeds(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to meds table
abnormalPatientsMeds = df_meds[df_meds.PATID.isin(patientsAboveBelowNorm) &
(df_meds.startYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'medsAboveBelowNorm': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedMedsCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedMedsCoefficients.index:
thisLabYear = labs_correlatedMedsCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for medInd in range(len(labs_correlatedMedsCoefficients.loc[lab].medsAboveBelowNorm.values)):
mytups.append((thisLabYear.medsAboveBelowNorm.values[medInd], thisLabYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## LABS TO PROCEDURES
def patientsAboveBelowNormalLabsProcs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsAboveBelowNorm) &
(df_procedures.encounterYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'procsAboveBelowNorm': abnormalPatientsProcs.RAW_PX.value_counts().index,
'counts': abnormalPatientsProcs.RAW_PX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedProceduresCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedProceduresCoefficients.index:
thisLabYear = labs_correlatedProceduresCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for procInd in range(len(labs_correlatedProceduresCoefficients.loc[lab].procsAboveBelowNorm.values)):
mytups.append((thisLabYear.procsAboveBelowNorm.values[procInd], thisLabYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## LABS TO DIAGNOSES
def patientsAboveBelowNormalLabsDiags(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsDiags = df_diagnoses[df_diagnoses.PATID.isin(patientsAboveBelowNorm) &
(df_diagnoses.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsAboveBelowNorm': abnormalPatientsDiags.DX.value_counts().index,
'counts': abnormalPatientsDiags.DX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedDiagnosisCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedDiagnosisCoefficients.index:
thisLabYear = labs_correlatedDiagnosisCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for diagInd in range(len(labs_correlatedDiagnosisCoefficients.loc[lab].diagsAboveBelowNorm.values)):
mytups.append((thisLabYear.diagsAboveBelowNorm.values[diagInd], thisLabYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedDiagnosisCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
## LABS TO PHENOTYPES
def patientsAboveBelowNormalLabsHPOs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsHPOs = df_phenotypes[df_phenotypes.PATID.isin(patientsAboveBelowNorm) &
(df_phenotypes.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'hposAboveBelowNorm': abnormalPatientsHPOs.HPO.value_counts().index,
'counts': abnormalPatientsHPOs.HPO.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedPhenotypesCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedPhenotypesCoefficients.index:
thisLabYear = labs_correlatedPhenotypesCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for hpoInd in range(len(labs_correlatedPhenotypesCoefficients.loc[lab].hposAboveBelowNorm.values)):
mytups.append((thisLabYear.hposAboveBelowNorm.values[hpoInd], thisLabYear.counts[hpoInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (labs_counts, labs_frequencyPerYear, labs_fractionOfSubjects, labs_units, labs_names,
labs_stats, labs_aboveBelowNorm, labs_correlatedLabsCoefficients, labs_abscorrelation,
labs_correlatedMedsCoefficients, labs_correlatedProceduresCoefficients, labs_correlatedDiagnosisCoefficients,
labs_correlatedPhenotypesCoefficients)
# Make Medication Profile
elif profileType == 'medications':
meds_medication = df_meds.JH_INGREDIENT_RXNORM_CODE.unique()
meds_dosageInfo = df_meds.groupby('JH_INGREDIENT_RXNORM_CODE').RX_DOSE_ORDERED.mean()
meds_frequencyPerYear = (df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE','startYear','PATID']).PATID
.count().groupby(['JH_INGREDIENT_RXNORM_CODE','startYear']).mean())
meds_fractionOfSubjects = (np.divide(df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE']).PATID.nunique(),
df_meds.PATID.nunique()))
grouped_meds = df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE', 'startYear'])
#meds_correlatedLabsCoefficients
def patientsAboveBelowNormalMedsLabs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisRX)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
meds_correlatedLabsCoefficients = (grouped_meds.apply(patientsAboveBelowNormalMedsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedLabsCoefficients.index:
thisMedYear = meds_correlatedLabsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for labInd in range(len(meds_correlatedLabsCoefficients.loc[med].labsAboveBelowNorm.values)):
mytups.append((thisMedYear.labsAboveBelowNorm.values[labInd], thisMedYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#meds_correlatedDiagsCoefficients
def patientsCrossFreqMedsDiags(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisRX)) &
(df_diagnoses.admitYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
meds_correlatedDiagsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedDiagsCoefficients.index:
thisMedYear = meds_correlatedDiagsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for diagInd in range(len(meds_correlatedDiagsCoefficients.loc[med].diagsCrossFreq.values)):
mytups.append((thisMedYear.diagsCrossFreq.values[diagInd], thisMedYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#meds_correlatedMedsCoefficients
def patientsCrossFreqMedsMeds(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisRX)) &
(pd.to_datetime(df_meds.RX_START_DATE).dt.year ==
pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
meds_correlatedMedsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedMedsCoefficients.index:
thisMedYear = meds_correlatedMedsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for medInd in range(len(meds_correlatedMedsCoefficients.loc[med].medsCrossFreq.values)):
mytups.append((thisMedYear.medsCrossFreq.values[medInd], thisMedYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index =
|
pd.MultiIndex.from_tuples(multiIndex)
|
pandas.MultiIndex.from_tuples
|
from os.path import dirname, join
import pandas as pd
from sstcam_sandbox import get_data, get_astri_2019
from CHECLabPy.core.io import HDF5Writer, TIOReader
from tqdm import tqdm
import psutil
def process(paths, output_path):
with HDF5Writer(output_path) as writer:
for ipath, path in enumerate(paths):
print(f"File: {ipath+1}/{len(paths)}")
reader = TIOReader(path)
n_events = reader.n_events
for wf in tqdm(reader, total=n_events):
data = dict(
ipath=ipath,
iev=wf.iev,
tack=wf.t_tack,
stale=wf.stale[0],
fci=wf.first_cell_id[0],
)
writer.append(
|
pd.DataFrame(data, index=[0])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 16:28:08 2020
@author: sawleen
"""
import yfinance as yf
import pandas as pd
from datetime import date as _date
from dateutil.relativedelta import relativedelta as _relativedelta
import math as _math
#pd.set_option('display.max_columns', 200)
class Data():
def __init__ (self, sgx_symbol):
self.sgx_symbol = sgx_symbol
self.stock = yf.Ticker("{}".format(sgx_symbol))
def get_name_disp(self):
stock = self.stock
try:
return stock.info['longName'] # for display
except:
print('! Warning: No name fetched for {}'.format(self.sgx_symbol))
return self.sgx_symbol
def get_name_short(self):
stock = self.stock
try:
short_name = stock.info['shortName'] # for feeding into SG Investor URL
short_name = short_name.lower()
short_name = short_name.replace(' ','-')
return short_name
except:
print('! Warning: Short name cannot be fetched for {}'.format(self.sgx_symbol))
return None
def get_sector(self):
stock = self.stock
try:
return stock.info['sector']
except:
print('Stock {} has no sector info'.format(self.sgx_symbol))
return None
# Mainly for REITS
def get_industry(self):
stock = self.stock
try:
return stock.info['industry']
except:
print('Stock {} has no industry info'.format(self.sgx_symbol))
return None
# Get basic stats
def get_basic_stats(self):
stock = self.stock
# Financial ratios
market_cap = _math.nan # Set to nan if value not in stock info
pb_ratio = _math.nan
pe_ratio = _math.nan
payout_ratio = _math.nan
roe = _math.nan
percentage_insider_share = _math.nan
try:
if 'marketCap' in stock.info.keys() and stock.info['marketCap']!=None:
market_cap = round(stock.info['marketCap']/(10**9),2)
if 'priceToBook' in stock.info.keys():
pb_ratio = round(stock.info['priceToBook'],2)
if 'trailingPE' in stock.info.keys():
pe_ratio = round(stock.info['trailingPE'],2)
if 'payoutRatio' in stock.info.keys():
payout_ratio = stock.info['payoutRatio']
if payout_ratio == None:
payout_ratio = _math.nan
else:
payout_ratio = payout_ratio*100
if 'returnOnEquity' in stock.info.keys():
roe = stock.info['returnOnEquity']
if roe:
roe = round(roe*100,2)
except Exception as e:
print(e)
try: #sometimes the data can't be loaded for some reason :/
percentage_insider_share = stock.major_holders.iloc[0,0]
percentage_insider_share = percentage_insider_share.replace('%','')
percentage_insider_share = float(percentage_insider_share)
percentage_insider_share = round(percentage_insider_share,2)
except:
print('! Warning: Percentage insider share for {} cannot be loaded.. '.format(self.sgx_symbol))
print('Data fetched instead:')
print('{}'.format(percentage_insider_share))
pass
stats={'Market Cap (bil)':market_cap,
'PB Ratio':pb_ratio,
'PE Ratio':pe_ratio,
'Dividend Payout Ratio':payout_ratio,
'% Return on Equity': roe,
'% insider shares':percentage_insider_share}
#'PEG Ratio':peg_ratio
stats_df = pd.DataFrame.from_dict(stats, orient='index')
stats_df.columns = ['Values']
stats_df = stats_df.T
return stats_df
# Get dividends
def get_dividends(self):
stock = self.stock
# Dividends
try:
div_yield = stock.info['dividendYield']
div_yield_trail = stock.info['trailingAnnualDividendYield']
div_yield_5yr = stock.info['fiveYearAvgDividendYield']
dividends = {'5-yr average':div_yield_5yr,
'Trailing':div_yield_trail,
'Forward':div_yield}
for div_type in dividends:
if dividends[div_type] != None:
if dividends[div_type] >1:
dividends[div_type] = round(dividends[div_type],2)
else:
dividends[div_type] = round(dividends[div_type]*100,2) # Convert % into figure
dividends_df = pd.DataFrame.from_dict(dividends, orient='index')
dividends_df=dividends_df.reset_index() # Set index(dividend type) to a column
dividends_df.columns=['Dividend Type','Values']
except:
print('! Warning: Dividend data cannot be fetched for {}'.format(self.sgx_symbol))
dividends_df = pd.DataFrame(index = ['5-yr average','Trailing','Forward'], columns=['Dividend Type','Values'])
return dividends_df
# Current stock price
def get_askprice(self):
stock=self.stock
try:
return stock.info['ask'] # Current price
except:
print('! Warning: Ask price cannot be fetched for {}'.format(self.sgx_symbol))
return None
# Return average growth over 3 years for both income and revenue
def process_inc_statement(self):
inc_statement = self.get_inc_statement()
inc_yoy_avg_growth={}
inc_yrly_growth={}
for fig_type in inc_statement:
figures = inc_statement[fig_type]
yrly_figures = self.get_yrly_figures(figures)
all_years = self.get_years(yrly_figures)
yrly_growth = self.calc_yrly_growth(yrly_figures, all_years, fig_type)
inc_yrly_growth[fig_type] = yrly_growth
growth_yoy_avg = self.calc_yoy_avg_growth(yrly_figures, all_years, fig_type)
inc_yoy_avg_growth[fig_type] = growth_yoy_avg
# Average yoy growth
inc_yoy_avg_growth_df =
|
pd.DataFrame.from_dict(inc_yoy_avg_growth,orient='index')
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-.
"""
Created on Tue Jan 21 13:04:58 2020
@author: xavier.mouy
"""
import pandas as pd
import xarray as xr
import os
import uuid
import warnings
import ecosound.core.tools
import ecosound.core.decorators
from ecosound.core.metadata import DeploymentInfo
import copy
class Annotation():
"""
A class used for manipulating annotation data.
The Annotation object stores from both manual analysis annotations
collected with software like PAMlab and Raven, and outputs from
automated detectors and classifiers.
Attributes
----------
data : pandas DataFrame
Annotation DataFranme.
Methods
-------
check_integrity(verbose=False, time_duplicates_only=False)
Check integrity of Annotation object.
from_raven(files, class_header='Sound type',subclass_header=None,
verbose=False)
Import annotation data from 1 or several Raven files.
to_raven(outdir, single_file=False)
Write annotation data to one or several Raven files.
from_pamlab(files, verbose=False)
Import annotation data from 1 or several PAMlab files.
to_pamlab(outdir, single_file=False)
Write annotation data to one or several Raven files.
from_parquet(file)
Import annotation data from a Parquet file.
to_parquet(file)
Write annotation data to a Parquet file.
from_netcdf(file)
Import annotation data from a netCDF4 file.
to_netcdf(file)
Write annotation data to a netCDF4 file.
insert_values(**kwargs)
Manually insert values for given Annotation fields.
insert_metadata(deployment_info_file)
Insert metadata information to the annotation from a
deployment_info_file.
filter_overlap_with(annot, freq_ovp=True, dur_factor_max=None,
dur_factor_min=None,ovlp_ratio_min=None,
remove_duplicates=False,inherit_metadata=False,
filter_deploymentID=True, inplace=False)
Filter annotations overalaping with another set of annotations.
get_labels_class()
Return all unique class labels.
get_labels_subclass()
Return all unique subclass labels.
get_fields()
Return list with all annotations fields.
summary(rows='deployment_ID',columns='label_class')
Produce a summary pivot table with the number of annotations for two
given annotation fields.
__add__()
Concatenate data from annotation objects uisng the + sign.
__len__()
Return number of annotations.
"""
def __init__(self):
"""
Initialize Annotation object.
Sets all the annotation fields.:
-'uuid': UUID,
Unique identifier code
-'from_detector': bool,
True if data comes from an automatic process.
-'software_name': str,
Software name. Can be Raven or PAMlab for manual analysis.
-'software_version': str,
Version of the software used to create the annotations.
-'operator_name': str,
Name of the person responsible for the creation of the
annotations.
-'UTC_offset': float,
Offset hours to UTC.
-'entry_date': datetime,
Date when the annotation was created.
-'audio_channel': int,
Channel number.
-'audio_file_name': str,
Name of the audio file.
-'audio_file_dir': str,
Directory where the audio file is.
-'audio_file_extension': str,
Extension of teh audio file.
-'audio_file_start_date': datetime,
Date of the audio file start time.
-'audio_sampling_frequency': int,
Sampling frequecy of the audio data.
-'audio_bit_depth': int,
Bit depth of the audio data.
-'mooring_platform_name': str,
Name of the moorig platform (e.g. 'glider','Base plate').
-'recorder_type': str,
Name of the recorder type (e.g., 'AMAR'), 'SoundTrap'.
-'recorder_SN': str,
Serial number of the recorder.
-'hydrophone_model': str,
Model of the hydrophone.
-'hydrophone_SN': str,
Serial number of the hydrophone.
-'hydrophone_depth': float,
Depth of the hydrophone in meters.
-'location_name': str,
Name of the deploymnet location.
-'location_lat': float,
latitude of the deployment location in decimal degrees.
-'location_lon': float,
longitude of the deployment location in decimal degrees.
-'location_water_depth': float,
Water depth at the deployment location in meters.
-'deployment_ID': str,
Unique ID of the deployment.
-'frequency_min': float,
Minimum frequency of the annotaion in Hz.
-'frequency_max': float,
Maximum frequency of the annotaion in Hz.
-'time_min_offset': float,
Start time of the annotaion, in seconds relative to the
begining of the audio file.
-'time_max_offset': float,
Stop time of the annotaion, in seconds relative to the
begining of the audio file.
-'time_min_date': datetime,
Date of the annotation start time.
-'time_max_date': datetime,
Date of the annotation stop time.
-'duration': float,
Duration of the annotation in seconds.
-'label_class': str,
label of the annotation class (e.g. 'fish').
-'label_subclass': str,
label of the annotation subclass (e.g. 'grunt')
'confidence': float,
Confidence of the classification.
Returns
-------
Annotation object.
"""
self.data = pd.DataFrame({
'uuid': [],
'from_detector': [], # True, False
'software_name': [],
'software_version': [],
'operator_name': [],
'UTC_offset': [],
'entry_date': [],
'audio_channel': [],
'audio_file_name': [],
'audio_file_dir': [],
'audio_file_extension': [],
'audio_file_start_date': [],
'audio_sampling_frequency': [],
'audio_bit_depth': [],
'mooring_platform_name': [],
'recorder_type': [],
'recorder_SN': [],
'hydrophone_model': [],
'hydrophone_SN': [],
'hydrophone_depth': [],
'location_name': [],
'location_lat': [],
'location_lon': [],
'location_water_depth': [],
'deployment_ID': [],
'frequency_min': [],
'frequency_max': [],
'time_min_offset': [],
'time_max_offset': [],
'time_min_date': [],
'time_max_date': [],
'duration': [],
'label_class': [],
'label_subclass': [],
'confidence': []
})
self._enforce_dtypes()
def check_integrity(self, verbose=False, ignore_frequency_duplicates=False):
"""
Check integrity of Annotation object.
Tasks performed:
1- Check that start time < stop time
2- Check that min frequency < max frequency
3- Remove duplicate entries based on time and frequency, filename,
labels and filenames
Parameters
----------
verbose : bool, optional
Print summary of the duplicate entries deleted.
The default is False.
ignore_frequency_duplicates : bool, optional
If set to True, doesn't consider frequency values when deleting
duplicates. It is useful when data are imported from Raven.
The default is False.
Raises
------
ValueError
If annotations have a start time > stop time
If annotations have a min frequency > max frequency
Returns
-------
None.
"""
# Drop all duplicates
count_start = len(self.data)
if ignore_frequency_duplicates: # doesn't use frequency boundaries
self.data = self.data.drop_duplicates(
subset=['time_min_offset',
'time_max_offset',
'label_class',
'label_subclass',
'audio_file_name',
], keep="first",).reset_index(drop=True)
else: # remove annot with exact same time AND frequency boundaries
self.data = self.data.drop_duplicates(
subset=['time_min_offset',
'time_max_offset',
'frequency_min',
'frequency_max',
'label_class',
'label_subclass',
'audio_file_name',
], keep="first",).reset_index(drop=True)
count_stop = len(self.data)
if verbose:
print('Duplicate entries removed:', str(count_start-count_stop))
# Check that start and stop times are coherent (i.e. t2 > t1)
time_check = self.data.index[
self.data['time_max_offset'] <
self.data['time_min_offset']].tolist()
if len(time_check) > 0:
raise ValueError(
'Incoherent annotation times (time_min > time_max). \
Problematic annotations:' + str(time_check))
# Check that min and max frequencies are coherent (i.e. fmin < fmax)
freq_check = self.data.index[
self.data['frequency_max'] < self.data['frequency_min']].tolist()
if len(freq_check) > 0:
raise ValueError(
'Incoherent annotation frequencies (frequency_min > \
frequency_max). Problematic annotations:' + str(freq_check))
if verbose:
print('Integrity test succesfull')
def from_raven(self, files, class_header='Sound type', subclass_header=None, verbose=False):
"""
Import data from 1 or several Raven files.
Load annotation tables from .txt files generated by the software Raven.
Parameters
----------
files : str, list
Path of the txt file(s) to import. Can be a str if importing a single
file. Needs to be a list if importing multiple files. If 'files' is
a folder, all files in that folder ending with '.selections.txt'
will be imported.
class_header : str, optional
Name of the header in the Raven file corresponding to the class
name. The default is 'Sound type'.
subclass_header : str, optional
Name of the header in the Raven file corresponding to the subclass
name. The default is None.
verbose : bool, optional
If set to True, print the summary of the annatation integrity test.
The default is False.
Returns
-------
None.
"""
if os.path.isdir(files):
files = ecosound.core.tools.list_files(files,
'.selections.txt',
recursive=False,
case_sensitive=True,
)
if verbose:
print(len(files), 'annotation files found.')
data = Annotation._import_csv_files(files)
files_timestamp = ecosound.core.tools.filename_to_datetime(
data['Begin Path'].tolist())
self.data['audio_file_start_date'] = files_timestamp
self.data['audio_channel'] = data['Channel']
self.data['audio_file_name'] = data['Begin Path'].apply(
lambda x: os.path.splitext(os.path.basename(x))[0])
self.data['audio_file_dir'] = data['Begin Path'].apply(
lambda x: os.path.dirname(x))
self.data['audio_file_extension'] = data['Begin Path'].apply(
lambda x: os.path.splitext(x)[1])
self.data['time_min_offset'] = data['Begin Time (s)']
self.data['time_max_offset'] = data['End Time (s)']
self.data['time_min_date'] = pd.to_datetime(
self.data['audio_file_start_date'] + pd.to_timedelta(
self.data['time_min_offset'], unit='s'))
self.data['time_max_date'] = pd.to_datetime(
self.data['audio_file_start_date'] +
pd.to_timedelta(self.data['time_max_offset'], unit='s'))
self.data['frequency_min'] = data['Low Freq (Hz)']
self.data['frequency_max'] = data['High Freq (Hz)']
if class_header is not None:
self.data['label_class'] = data[class_header]
if subclass_header is not None:
self.data['label_subclass'] = data[subclass_header]
self.data['from_detector'] = False
self.data['software_name'] = 'raven'
self.data['uuid'] = self.data.apply(lambda _: str(uuid.uuid4()), axis=1)
self.data['duration'] = self.data['time_max_offset'] - self.data['time_min_offset']
self.check_integrity(verbose=verbose, ignore_frequency_duplicates=True)
if verbose:
print(len(self), 'annotations imported.')
def to_raven(self, outdir, outfile='Raven.Table.1.selections.txt', single_file=False):
"""
Write data to 1 or several Raven files.
Write annotations as .txt files readable by the software Raven. Output
files can be written in a single txt file or in several txt files (one
per audio recording). In the latter case, output file names are
automatically generated based on the audio file's name.
Parameters
----------
outdir : str
Path of the output directory where the Raven files are written.
outfile : str
Name of the output file. Only used is single_file is True. The
default is 'Raven.Table.1.selections.txt'.
single_file : bool, optional
If set to True, writes a single output file with all annotations.
The default is False.
Returns
-------
None.
"""
if single_file:
annots = [self.data]
else:
annots = [pd.DataFrame(y) for x, y in self.data.groupby(
'audio_file_name', as_index=False)]
for annot in annots:
annot.reset_index(inplace=True, drop=True)
cols = ['Selection', 'View', 'Channel', 'Begin Time (s)',
'End Time (s)', 'Delta Time (s)', 'Low Freq (Hz)',
'High Freq (Hz)', 'Begin Path', 'File Offset (s)',
'Begin File', 'Class', 'Sound type', 'Software',
'Confidence']
outdf = pd.DataFrame({'Selection': 0, 'View': 0, 'Channel': 0,
'Begin Time (s)': 0, 'End Time (s)': 0,
'Delta Time (s)': 0, 'Low Freq (Hz)': 0,
'High Freq (Hz)': 0, 'Begin Path': 0,
'File Offset (s)': 0, 'Begin File': 0,
'Class': 0, 'Sound type': 0, 'Software': 0,
'Confidence': 0},
index=list(range(annot.shape[0])))
outdf['Selection'] = range(1, annot.shape[0]+1)
outdf['View'] = 'Spectrogram 1'
outdf['Channel'] = annot['audio_channel']
outdf['Begin Time (s)'] = annot['time_min_offset']
outdf['End Time (s)'] = annot['time_max_offset']
outdf['Delta Time (s)'] = annot['duration']
outdf['Low Freq (Hz)'] = annot['frequency_min']
outdf['High Freq (Hz)'] = annot['frequency_max']
outdf['File Offset (s)'] = annot['time_min_offset']
outdf['Class'] = annot['label_class']
outdf['Sound type'] = annot['label_subclass']
outdf['Software'] = annot['software_name']
outdf['Confidence'] = annot['confidence']
outdf['Begin Path'] = [os.path.join(x, y) + z for x, y, z
in zip(annot['audio_file_dir'],
annot['audio_file_name'],
annot['audio_file_extension'])]
outdf['Begin File'] = [x + y for x, y
in zip(annot['audio_file_name'],
annot['audio_file_extension'])]
outdf = outdf.fillna(0)
if single_file:
outfilename = os.path.join(outdir, outfile)
else:
outfilename = os.path.join(
outdir, str(annot['audio_file_name'].iloc[0])
+ str(annot['audio_file_extension'].iloc[0])
+ '.chan' + str(annot['audio_channel'].iloc[0])
+ '.Table.1.selections.txt')
outdf.to_csv(outfilename,
sep='\t',
encoding='utf-8',
header=True,
columns=cols,
index=False)
def from_pamlab(self, files, verbose=False):
"""
Import data from 1 or several PAMlab files.
Load annotation data from .log files generated by the software PAMlab.
Parameters
----------
files : str, list
Path of the txt file to import. Can be a str if importing a single
file or entire folder. Needs to be a list if importing multiple
files. If 'files' is a folder, all files in that folder ending with
'annotations.log' will be imported.
verbose : bool, optional
If set to True, print the summary of the annatation integrity test.
The default is False.
Returns
-------
None.
"""
if type(files) is str:
if os.path.isdir(files):
files = ecosound.core.tools.list_files(files,
' annotations.log',
recursive=False,
case_sensitive=True,
)
if verbose:
print(len(files), 'annotation files found.')
data = Annotation._import_csv_files(files)
files_timestamp = ecosound.core.tools.filename_to_datetime(
data['Soundfile'].tolist())
self.data['audio_file_start_date'] = files_timestamp
self.data['operator_name'] = data['Operator']
self.data['entry_date'] = pd.to_datetime(
data['Annotation date and time (local)'],
format='%Y-%m-%d %H:%M:%S.%f')
self.data['audio_channel'] = data['Channel']
self.data['audio_file_name'] = data['Soundfile'].apply(
lambda x: os.path.splitext(os.path.basename(x))[0])
self.data['audio_file_dir'] = data['Soundfile'].apply(
lambda x: os.path.dirname(x))
self.data['audio_file_extension'] = data['Soundfile'].apply(
lambda x: os.path.splitext(x)[1])
self.data['audio_sampling_frequency'] = data['Sampling freq (Hz)']
self.data['recorder_type'] = data['Recorder type']
self.data['recorder_SN'] = data['Recorder ID']
self.data['hydrophone_depth'] = data['Recorder depth']
self.data['location_name'] = data['Station']
self.data['location_lat'] = data['Latitude (deg)']
self.data['location_lon'] = data['Longitude (deg)']
self.data['time_min_offset'] = data['Left time (sec)']
self.data['time_max_offset'] = data['Right time (sec)']
self.data['time_min_date'] = pd.to_datetime(
self.data['audio_file_start_date']
+ pd.to_timedelta(self.data['time_min_offset'], unit='s'))
self.data['time_max_date'] = pd.to_datetime(
self.data['audio_file_start_date'] +
pd.to_timedelta(self.data['time_max_offset'], unit='s'))
self.data['frequency_min'] = data['Bottom freq (Hz)']
self.data['frequency_max'] = data['Top freq (Hz)']
self.data['label_class'] = data['Species']
self.data['label_subclass'] = data['Call type']
self.data['from_detector'] = False
self.data['software_name'] = 'pamlab'
self.data['uuid'] = self.data.apply(lambda _: str(uuid.uuid4()), axis=1)
self.data['duration'] = self.data['time_max_offset'] - self.data['time_min_offset']
self.check_integrity(verbose=verbose)
if verbose:
print(len(self), 'annotations imported.')
def to_pamlab(self, outdir, outfile='PAMlab annotations.log', single_file=False):
"""
Write data to 1 or several PAMlab files.
Write annotations as .log files readable by the software PAMlab. Output
files can be written in a single txt file or in several txt files (one
per audio recording). In teh latter case, output file names are
automatically generated based on the audio file's name and the name
format required by PAMlab.
Parameters
----------
outdir : str
Path of the output directory where the Raven files are written.
outfile : str
Name of teh output file. Only used is single_file is True. The
default is 'PAMlab annotations.log'.
single_file : bool, optional
If set to True, writes a single output file with all annotations.
The default is False.
Returns
-------
None.
"""
if single_file:
annots = [self.data]
else:
annots = [pd.DataFrame(y)
for x, y in self.data.groupby(
'audio_file_name', as_index=False)]
for annot in annots:
annot.reset_index(inplace=True, drop=True)
cols = ['fieldkey:', 'Soundfile', 'Channel', 'Sampling freq (Hz)',
'Latitude (deg)', 'Longitude (deg)', 'Recorder ID',
'Recorder depth', 'Start date and time (UTC)',
'Annotation date and time (local)', 'Recorder type',
'Deployment', 'Station', 'Operator', 'Left time (sec)',
'Right time (sec)', 'Top freq (Hz)', 'Bottom freq (Hz)',
'Species', 'Call type', 'rms SPL', 'SEL', '', '']
outdf = pd.DataFrame({'fieldkey:': 0, 'Soundfile': 0, 'Channel': 0,
'Sampling freq (Hz)': 0, 'Latitude (deg)': 0,
'Longitude (deg)': 0, 'Recorder ID': 0,
'Recorder depth': 0,
'Start date and time (UTC)': 0,
'Annotation date and time (local)': 0,
'Recorder type': 0, 'Deployment': 0,
'Station': 0, 'Operator': 0,
'Left time (sec)': 0, 'Right time (sec)': 0,
'Top freq (Hz)': 0, 'Bottom freq (Hz)': 0,
'Species': '', 'Call type': '', 'rms SPL': 0,
'SEL': 0, '': '', '': ''},
index=list(range(annot.shape[0])))
outdf['fieldkey:'] = 'an:'
outdf['Species'] = annot['label_class']
outdf['Call type'] = annot['label_subclass']
outdf['Left time (sec)'] = annot['time_min_offset']
outdf['Right time (sec)'] = annot['time_max_offset']
outdf['Top freq (Hz)'] = annot['frequency_max']
outdf['Bottom freq (Hz)'] = annot['frequency_min']
outdf['rms SPL'] = annot['confidence']
outdf['Operator'] = annot['operator_name']
outdf['Channel'] = annot['audio_channel']
outdf['Annotation date and time (local)'] = annot['entry_date']
outdf['Sampling freq (Hz)'] = annot['audio_sampling_frequency']
outdf['Recorder type'] = annot['recorder_type']
outdf['Recorder ID'] = annot['recorder_SN']
outdf['Recorder depth'] = annot['hydrophone_depth']
outdf['Station'] = annot['location_name']
outdf['Latitude (deg)'] = annot['location_lat']
outdf['Longitude (deg)'] = annot['location_lon']
outdf['Soundfile'] = [os.path.join(x, y) + z for x, y, z
in zip(annot['audio_file_dir'],
annot['audio_file_name'],
annot['audio_file_extension']
)
]
outdf = outdf.fillna(0)
if single_file:
outfilename = os.path.join(outdir, outfile)
else:
outfilename = os.path.join(
outdir,
str(annot['audio_file_name'].iloc[0])
+ str(annot['audio_file_extension'].iloc[0])
+ ' annotations.log')
outdf.to_csv(outfilename,
sep='\t',
encoding='utf-8',
header=True,
columns=cols,
index=False)
def from_parquet(self, file, verbose=False):
"""
Import data from a Parquet file.
Load annotations from a .parquet file. This format allows for fast and
efficient data storage and access.
Parameters
----------
file : str
Path of the input parquet file.
verbose : bool, optional
If set to True, print the summary of the annatation integrity test.
The default is False.
Returns
-------
None.
"""
self.data = pd.read_parquet(file)
self.check_integrity(verbose=verbose)
if verbose:
print(len(self), 'annotations imported.')
def to_parquet(self, file):
"""
Write data to a Parquet file.
Write annotations as .parquet file. This format allows for fast and
efficient data storage and access.
Parameters
----------
file : str
Path of the output directory where the parquet files is written.
Returns
-------
None.
"""
# make sure the HP SN column are strings
self.data.hydrophone_SN = self.data.hydrophone_SN.astype(str)
# save
self.data.to_parquet(file,
coerce_timestamps='ms',
allow_truncated_timestamps=True)
def from_netcdf(self, file, verbose=False):
"""
Import data from a netcdf file.
Load annotations from a .nc file. This format works well with xarray
and Dask.
Parameters
----------
file : str
Path of the nc file to import. Can be a str if importing a single
file or entire folder. Needs to be a list if importing multiple
files. If 'files' is a folder, all files in that folder ending with
'.nc' will be imported.
verbose : bool, optional
If set to True, print the summary of the annatation integrity test.
The default is False.
Returns
-------
None.
"""
if type(file) is str:
if os.path.isdir(file):
file = ecosound.core.tools.list_files(
file,
'.nc',
recursive=False,
case_sensitive=True,)
if verbose:
print(len(file), 'files found.')
else:
file = [file]
self.data = self._import_netcdf_files(file)
self.check_integrity(verbose=verbose)
if verbose:
print(len(self), 'annotations imported.')
def to_netcdf(self, file):
"""
Write data to a netcdf file.
Write annotations as .nc file. This format works well with xarray
and Dask.
Parameters
----------
file : str
Path of the output file (.nc) to be written.
Returns
-------
None.
"""
if file.endswith('.nc') is False:
file = file + '.nc'
self._enforce_dtypes()
meas = self.data
meas.set_index('time_min_date', drop=False, inplace=True)
meas.index.name = 'date'
dxr1 = meas.to_xarray()
dxr1.attrs['datatype'] = 'Annotation'
dxr1.to_netcdf(file, engine='netcdf4', format='NETCDF4')
def insert_values(self, **kwargs):
"""
Insert constant values for given Annotation fields.
Fill in entire columns of the annotation dataframe with constant
values. It is usefull for adding project related informations that may
not be included in data imported from Raven or PAMlab files (e.g.,
'location_lat', 'location_lon'). Values can be inserted for several
annotations fields at a time by setting several keywords. This should
only be used for filling in static values (i.e., not for variable
values such as time/frequency boundaries of the annotations). Keywords
must have the exact same name as the annotation field (see method
.get_fields). For example: (location_lat=48.6, recorder_type='AMAR')
Parameters
----------
**kwargs : annotation filed name
Keyword and value of the annotation field to fill in. Keywords must
have the exact same name as the annotation field.
Raises
------
ValueError
If keyword doesn't match any annotation field name.
Returns
-------
None.
"""
for key, value in kwargs.items():
if key in self.data:
self.data[key] = value
else:
raise ValueError('The annotation object has no field: '
+ str(key))
def insert_metadata(self, deployment_info_file):
"""
Insert metadata infortion to the annotation.
Uses the Deployment_info_file to fill in the metadata of the annotation
. The deployment_info_file must be created using the DeploymentInfo
class from ecosound.core.metadata using DeploymentInfo.write_template.
Parameters
----------
deployment_info_file : str
Csv file readable by ecosound.core.meta.DeploymentInfo.read(). It
contains all the deployment metadata.
Returns
-------
None.
"""
dep_info = DeploymentInfo()
dep_info.read(deployment_info_file)
self.insert_values(UTC_offset=dep_info.data['UTC_offset'].values[0],
audio_channel=dep_info.data['audio_channel_number'].values[0],
audio_sampling_frequency=dep_info.data['sampling_frequency'].values[0],
audio_bit_depth=dep_info.data['bit_depth'].values[0],
mooring_platform_name = dep_info.data['mooring_platform_name'].values[0],
recorder_type=dep_info.data['recorder_type'].values[0],
recorder_SN=dep_info.data['recorder_SN'].values[0],
hydrophone_model=dep_info.data['hydrophone_model'].values[0],
hydrophone_SN=dep_info.data['hydrophone_SN'].values[0],
hydrophone_depth=dep_info.data['hydrophone_depth'].values[0],
location_name=dep_info.data['location_name'].values[0],
location_lat=dep_info.data['location_lat'].values[0],
location_lon=dep_info.data['location_lon'].values[0],
location_water_depth=dep_info.data['location_water_depth'].values[0],
deployment_ID=dep_info.data['deployment_ID'].values[0],
)
def filter_overlap_with(self, annot, freq_ovp=True,dur_factor_max=None,dur_factor_min=None,ovlp_ratio_min=None,remove_duplicates=False,inherit_metadata=False,filter_deploymentID=True, inplace=False):
"""
Filter overalaping annotations.
Only keep annotations that overlap in time and/or frequency with the
annotation object "annot".
Parameters
----------
annot : ecosound.annotation.Annotation object
Annotation object used to filter the current annotations.
freq_ovp : bool, optional
If set to True, filters not only annotations that overlap in time
but also overlap in frequency. The default is True.
dur_factor_max : float, optional
Constraint dictating the maximum duration overlapped
annotations must not exceed in order to be "kept". Any annotations
whose duration exceed dur_factor_max*annot.duration are discareded,
even if they overlap in time/frequency. If set to None, no maximum
duration constraints are applied. The default is None.
dur_factor_min : float, optional
Constraint dictating the minimum duration overlapped
annotations must exceed in order to be "kept". Any annotations
whose duration does not exceed dur_factor_min*annot.duration are
discareded, even if they overlap in time/frequency. If set to None,
no minimum duration constraints are applied. The default is None.
ovlp_ratio_min : float, optional
Constraint dictating the minimum amount (percentage) of overlap in
time annotations must have in order to be "kept". If set to None,
no minimum time overlap constraints are applied. The default is
None.
remove_duplicates : bool, optional
If set to True, only selects a single annotation overlaping with
annotations from the annot object. This is relevant only if several
annotations overlap with an annotation from the annot object. The
default is False.
inherit_metadata : bool, optional
If set to True, the filtered annotations inherit all the metadata
information from the matched annotations in the annot object. It
includes 'label_class', 'label_subclass', 'mooring_platform_name',
'recorder_type', 'recorder_SN', 'hydrophone_model', 'hydrophone_SN'
, 'hydrophone_depth', 'location_name', 'location_lat',
'location_lon', 'location_water_depth', and 'deployment_ID'. The
default is False.
filter_deploymentID : bool, optional
If set to False, doesn't use the deploymentID to match annotations
together but just the frequency and time offset boundaries of the
annotations. The default is True.
inplace : bool, optional
If set to True, updates the urrent object with the filter results.
The default is False.
Returns
-------
out_object : ecosound.annotation.Annotation
Filtered Annotation object.
"""
stack = []
det = self.data
for index, an in annot.data.iterrows(): # for each annotation
# restrict to the specific deploymnetID of the annotation if file names are not unique
if filter_deploymentID:
df = det[det.deployment_ID == an.deployment_ID]
else:
df = det
## filter detections to same file and deployment ID as the current annotation
df = df[df.audio_file_name == an.audio_file_name]
## check overlap in time first
if len(df) > 0:
df = df[((df.time_min_offset <= an.time_min_offset) & (df.time_max_offset >= an.time_max_offset)) | # 1- annot inside detec
((df.time_min_offset >= an.time_min_offset) & (df.time_max_offset <= an.time_max_offset)) | # 2- detec inside annot
((df.time_min_offset < an.time_min_offset) & (df.time_max_offset < an.time_max_offset) & (df.time_max_offset > an.time_min_offset)) | # 3- only the end of the detec overlaps with annot
((df.time_min_offset > an.time_min_offset) & (df.time_min_offset < an.time_max_offset) & (df.time_max_offset > an.time_max_offset)) # 4- only the begining of the detec overlaps with annot
]
# then looks at frequency overlap. Can be turned off if freq bounds are not reliable
if (len(df) > 0) & freq_ovp:
df = df[((df.frequency_min <= an.frequency_min) & (df.frequency_max >= an.frequency_max)) | # 1- annot inside detec
((df.frequency_min >= an.frequency_min) & (df.frequency_max <= an.frequency_max)) | # 2- detec inside annot
((df.frequency_min < an.frequency_min) & (df.frequency_max < an.frequency_max) & (df.frequency_max > an.frequency_min)) | # 3- only the top of the detec overlaps with annot
((df.frequency_min > an.frequency_min) & (df.frequency_min < an.frequency_max) & (df.frequency_max > an.frequency_max)) # 4- only the bottom of the detec overlaps with annot
]
# discard if durations are too different
if (len(df) > 0) & (dur_factor_max is not None):
df = df[df.duration < an.duration*dur_factor_max]
if (len(df) > 0) & (dur_factor_min is not None):
df = df[df.duration > an.duration*dur_factor_min]
# discard if they don't overlap enough
if (len(df) > 0) & (ovlp_ratio_min is not None):
df_ovlp = (df['time_max_offset'].apply(lambda x: min(x,an.time_max_offset)) - df['time_min_offset'].apply(lambda x: max(x,an.time_min_offset))) / an.duration
df = df[df_ovlp>=ovlp_ratio_min]
df_ovlp = df_ovlp[df_ovlp>=ovlp_ratio_min]
if (len(df) > 1) & remove_duplicates:
try:
df = df.iloc[[df_ovlp.values.argmax()]] # pick teh one with max time overlap
except:
print('asas')
if len(df) > 0:
if inherit_metadata:
df['mooring_platform_name'] = an['mooring_platform_name']
df['recorder_type'] = an['recorder_type']
df['recorder_SN'] = an['recorder_SN']
df['hydrophone_model'] = an['hydrophone_model']
df['hydrophone_SN'] = an['hydrophone_SN']
df['hydrophone_depth'] = an['hydrophone_depth']
df['location_name'] = an['location_name']
df['location_lat'] = an['location_lat']
df['location_lon'] = an['location_lon']
df['location_water_depth'] = an['location_water_depth']
df['deployment_ID'] = an['deployment_ID']
df['label_class'] = an['label_class']
df['label_subclass'] = an['label_subclass']
stack.append(df)
ovlp = pd.concat(stack, ignore_index=True)
if inplace:
self.data = ovlp
self.check_integrity()
out_object = None
else:
out_object = copy.copy(self)
out_object.data = ovlp
out_object.check_integrity()
return out_object
def get_labels_class(self):
"""
Get all the unique class labels of the annotations.
Returns
-------
classes : list
List of unique class labels.
"""
if len(self.data) > 0:
classes = list(self.data['label_class'].unique())
else:
classes = []
return classes
def get_labels_subclass(self):
"""
Get all the unique subclass labels of the annotations.
Returns
-------
classes : list
List of unique subclass labels.
"""
if len(self.data) > 0:
subclasses = list(self.data['label_subclass'].unique())
else:
subclasses = []
return subclasses
def get_fields(self):
"""
Get all the annotations fields.
Returns
-------
classes : list
List of annotation fields.
"""
return list(self.data.columns)
def summary(self, rows='deployment_ID', columns='label_class'):
"""
Produce a summary table of the number of annotations.
Create a pivot table summarizing the number of annotations for each
deployment and each label class. The optional arguments 'rows' and
'columns' can be used to change the fields of the annotations to be
displayed in the table.
Parameters
----------
rows : 'str', optional
Name of the annotation field for the rows of the table. The default
is 'deployment_ID'.
columns : 'str', optional
Name of the annotation field for the columns of the table. The
default default is 'label_class'.
Returns
-------
summary : pandas DataFrame
Pivot table with the number of annotations in each category.
"""
summary = self.data.pivot_table(index=rows,
columns=columns,
aggfunc='size',
fill_value=0)
# Add a "Total" row and column
summary.loc['Total'] = summary.sum()
summary['Total'] = summary.sum(axis=1)
return summary
def _enforce_dtypes(self):
self.data = self.data.astype({
'uuid': 'str',
'from_detector': 'bool', # True, False
'software_name': 'str',
'software_version': 'str',
'operator_name': 'str',
'UTC_offset': 'float',
'entry_date': 'datetime64[ns]',
'audio_channel': 'int',
'audio_file_name': 'str',
'audio_file_dir': 'str',
'audio_file_extension': 'str',
'audio_file_start_date': 'datetime64[ns]',
'audio_sampling_frequency': 'int',
'audio_bit_depth': 'int',
'mooring_platform_name': 'str',
'recorder_type': 'str',
'recorder_SN': 'str',
'hydrophone_model': 'str',
'hydrophone_SN': 'str',
'hydrophone_depth': 'float',
'location_name': 'str',
'location_lat': 'float',
'location_lon': 'float',
'location_water_depth': 'float',
'deployment_ID': 'str',
'frequency_min': 'float',
'frequency_max': 'float',
'time_min_offset': 'float',
'time_max_offset': 'float',
'time_min_date': 'datetime64[ns]',
'time_max_date': 'datetime64[ns]',
'duration': 'float',
'label_class': 'str',
'label_subclass': 'str',
'confidence': 'float',
})
@staticmethod
@ecosound.core.decorators.listinput
def _import_csv_files(files):
"""Import one or several text files with header to a Panda datafrane."""
assert type(files) in (str, list), "Input must be of type str (single \
file) or list (multiple files)"
# Import all files to a dataframe
for idx, file in enumerate(files):
# Extract header first due to formating issues in PAMlab files
header = pd.read_csv(file,
delimiter='\t',
header=None,
nrows=1)
headerLength = header.shape[1]
# Get all data and only keep values correpsonding to header labels
tmp = pd.read_csv(file,
delimiter='\t',
header=None,
skiprows=1,
na_values=None)
tmp = tmp.iloc[:, 0:headerLength]
# Put header back
tmp = tmp.set_axis(list(header.values[0]), axis=1, inplace=False)
if idx == 0:
data = tmp
else:
data = pd.concat([data, tmp], ignore_index=True, sort=False)
return data
def _import_netcdf_files(self, files):
"""Import one or several netcdf files to a Panda datafrane."""
assert type(files) in (str, list), "Input must be of type str (single \
file or directory) or list (multiple files)"
# Import all files to a dataframe
tmp = []
for idx, file in enumerate(files):
dxr = xr.open_dataset(file)
if dxr.attrs['datatype'] == 'Annotation':
tmp2 = dxr.to_dataframe()
tmp2.reset_index(inplace=True)
elif dxr.attrs['datatype'] == 'Measurement':
tmp2 = dxr.to_dataframe()
tmp2.reset_index(inplace=True)
tmp2 = tmp2[self.get_fields()]
warnings.warn('Importing Measurement data as Annotation >> Not all Measurement data are loaded.')
else:
raise ValueError(file + 'Not an Annotation file.')
tmp.append(tmp2)
data =
|
pd.concat(tmp, ignore_index=True, sort=False)
|
pandas.concat
|
import anaconda2
import pandas as pd
import matplotlib.pyplot as plt
s = pd.read_csv('SUYI Proxy Data 2012-2013.csv');
df =
|
pd.DataFrame(s)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.