prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
""" Utils mainly to write code agnostic to numpy or pandas. """
# Author: <NAME> <<EMAIL>>
from typing import List, Optional, Union
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from os import getcwd
from os.path import join
ArrayLike = Union[pd.Series, np.array, List]
Matrix = Union[pd.DataFrame, np.ndarray]
LOOKUP_TABLE_PATH = join("data", "shift_lookup.csv")
def standardize_uppercase(input: str) -> str:
"""Standardize string to upper case."""
return input.upper()
def sigmoid(X: ArrayLike) -> ArrayLike:
return 1 / (1 + np.exp(-X))
def isin(X: Union[ArrayLike, Matrix], list: ArrayLike) -> bool:
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
return X.isin(list)
return np.isin(X, list)
def is_numeric(X: Union[ArrayLike, Matrix]) -> bool:
if isinstance(X, pd.DataFrame):
return is_numeric_dtype(X.values)
return is_numeric_dtype(X)
def enforce_numeric(
X: Union[ArrayLike, Matrix], vars_to_enforce: Optional[List[Union[str, int]]] = None
) -> Matrix:
if isinstance(X, np.ndarray):
X = np.array(list(map(pd.to_numeric, X)))
all_nan_cols = np.isnan(X).all(axis=0)
X = X[:, ~all_nan_cols]
else: # pd_df, or native python array
# enforce pd df if native python list
X = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', | range(10) | pandas.compat.range |
import collections
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
from sklearn.externals.joblib import Parallel, delayed
train = | pd.read_csv('../input/training.txt', header=None, names=['id', 'type']) | pandas.read_csv |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr( | Timestamp(2015, 11, 12, 1, 2, 3, 999999) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Contact Binners
~~~~~~~~~~~~~~~
Binners are iterators that convert input data of various flavors into a
properly sorted, chunked stream of binned contacts.
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict, Counter
from bisect import bisect_left
from functools import partial
import itertools
import warnings
import six
from pandas.api.types import is_integer_dtype
import numpy as np
import pandas as pd
from .._logging import get_logger
from ..util import (
rlencode,
partition,
check_bins,
get_chromsizes,
GenomeSegmentation,
balanced_partition,
)
logger = get_logger("cooler.create")
class BadInputError(ValueError):
pass
SANITIZE_PRESETS = {
"bg2": dict(
decode_chroms=True,
is_one_based=False,
tril_action="reflect",
chrom_field="chrom",
anchor_field="start",
sided_fields=("chrom", "start", "end"),
suffixes=("1", "2"),
sort=True,
validate=True,
),
"pairs": dict(
decode_chroms=True,
is_one_based=False,
tril_action="reflect",
chrom_field="chrom",
anchor_field="pos",
sided_fields=("chrom", "pos"),
suffixes=("1", "2"),
sort=False,
validate=True,
),
}
def _sanitize_records(
chunk,
gs,
decode_chroms,
is_one_based,
tril_action,
chrom_field,
anchor_field,
sided_fields,
suffixes,
sort,
validate,
):
# Get integer contig IDs
if decode_chroms:
# Unspecified chroms get assigned category = NaN and integer code = -1
chrom1_ids = np.array(
pd.Categorical(chunk["chrom1"], gs.contigs, ordered=True).codes
)
chrom2_ids = np.array(
pd.Categorical(chunk["chrom2"], gs.contigs, ordered=True).codes
)
else:
chrom1_ids = chunk["chrom1"].values
chrom2_ids = chunk["chrom2"].values
if validate:
for col, dt in [("chrom1", chrom1_ids.dtype), ("chrom2", chrom2_ids.dtype)]:
if not is_integer_dtype(dt):
raise BadInputError(
"`{}` column is non-integer. ".format(col)
+ "If string, use `decode_chroms=True` to convert to enum"
)
# Drop records from non-requested chromosomes
to_drop = (chrom1_ids < 0) | (chrom2_ids < 0)
if np.any(to_drop):
mask = ~to_drop
chrom1_ids = chrom1_ids[mask]
chrom2_ids = chrom2_ids[mask]
chunk = chunk[mask].copy()
# Handle empty case
if not len(chunk):
chunk["bin1_id"] = []
chunk["bin2_id"] = []
return chunk
# Find positional anchor columns, convert to zero-based if needed
anchor1 = np.array(chunk[anchor_field + suffixes[0]])
anchor2 = np.array(chunk[anchor_field + suffixes[1]])
if is_one_based:
anchor1 -= 1
anchor2 -= 1
# Check types and bounds
if validate:
for dt in [anchor1.dtype, anchor2.dtype]:
if not is_integer_dtype(dt):
raise BadInputError("Found a non-integer anchor column")
is_neg = (anchor1 < 0) | (anchor2 < 0)
if np.any(is_neg):
err = chunk[is_neg]
raise BadInputError(
"Found an anchor position with negative value. Make sure your coordinates are 1-based or use the --zero-based option when loading. \n{}".format(
err.head().to_csv(sep="\t")
)
)
chromsizes1 = gs.chromsizes[chrom1_ids].values
chromsizes2 = gs.chromsizes[chrom2_ids].values
is_excess = (anchor1 > chromsizes1) | (anchor2 > chromsizes2)
if np.any(is_excess):
err = chunk[is_excess]
raise BadInputError(
"Found an anchor position exceeding chromosome length:\n{}".format(
err.head().to_csv(sep="\t")
)
)
# Handle lower triangle records
if tril_action is not None:
is_tril = (chrom1_ids > chrom2_ids) | (
(chrom1_ids == chrom2_ids) & (anchor1 > anchor2)
)
if np.any(is_tril):
if tril_action == "reflect":
chrom1_ids[is_tril], chrom2_ids[is_tril] = (
chrom2_ids[is_tril],
chrom1_ids[is_tril],
)
anchor1[is_tril], anchor2[is_tril] = anchor2[is_tril], anchor1[is_tril]
for field in sided_fields:
chunk.loc[is_tril, field + suffixes[0]], chunk.loc[
is_tril, field + suffixes[1]
] = (
chunk.loc[is_tril, field + suffixes[1]],
chunk.loc[is_tril, field + suffixes[0]],
)
elif tril_action == "drop":
mask = ~is_tril
chrom1_ids = chrom1_ids[mask]
chrom2_ids = chrom2_ids[mask]
anchor1 = anchor1[mask]
anchor2 = anchor2[mask]
chunk = chunk[mask].copy()
elif tril_action == "raise":
err = chunk[is_tril]
raise BadInputError(
"Found lower triangle pairs:\n{}".format(
err.head().to_csv(sep="\t")
)
)
else:
raise ValueError("Unknown tril_action value: '{}'".format(tril_action))
# Assign bin IDs from bin table
chrom_binoffset = gs.chrom_binoffset
binsize = gs.binsize
if binsize is None:
chrom_abspos = gs.chrom_abspos
start_abspos = gs.start_abspos
bin1_ids = []
bin2_ids = []
for cid1, pos1, cid2, pos2 in zip(chrom1_ids, anchor1, chrom2_ids, anchor2):
lo = chrom_binoffset[cid1]
hi = chrom_binoffset[cid1 + 1]
bin1_ids.append(
lo
+ np.searchsorted(
start_abspos[lo:hi], chrom_abspos[cid1] + pos1, side="right"
)
- 1
)
lo = chrom_binoffset[cid2]
hi = chrom_binoffset[cid2 + 1]
bin2_ids.append(
lo
+ np.searchsorted(
start_abspos[lo:hi], chrom_abspos[cid2] + pos2, side="right"
)
- 1
)
chunk["bin1_id"] = bin1_ids
chunk["bin2_id"] = bin2_ids
else:
chunk["bin1_id"] = chrom_binoffset[chrom1_ids] + anchor1 // binsize
chunk["bin2_id"] = chrom_binoffset[chrom2_ids] + anchor2 // binsize
# Sort by bin IDs
if sort:
chunk = chunk.sort_values(["bin1_id", "bin2_id"])
# TODO: check for duplicate records and warn
return chunk
def sanitize_records(bins, schema=None, **kwargs):
"""
Builds a funtion to sanitize and assign bin IDs to a data frame of
paired genomic positions based on a provided genomic bin segmentation.
Parameters
----------
bins : DataFrame
Bin table to compare records against.
schema : str, optional
Use pre-defined parameters for a particular format. Any options can be
overriden via kwargs. If not provided, values for all the options below
must be given.
decode_chroms : bool
Convert string chromosome names to integer IDs based on the order given
in the bin table. Set to False if the chromosomes are already given as
an enumeration, starting at 0. Records with either chrom ID < 0 are
dropped.
is_one_based : bool
Whether the input anchor coordinates are one-based, rather than
zero-based. They will be converted to zero-based.
tril_action : 'reflect', 'drop', 'raise' or None
How to handle lower triangle ("tril") records.
If set to 'reflect', tril records will be flipped or "reflected"
to their mirror image: "sided" column pairs will have their values
swapped.
If set to 'drop', tril records will be discarded. This is useful if
your input data is symmetric, i.e. contains mirror duplicates of every
record.
If set to 'raise', an exception will be raised if any tril record is
encountered.
chrom_field : str
Base name of the two chromosome/scaffold/contig columns.
anchor_field : str
Base name of the positional anchor columns.
sided_fields : sequence of str
Base names of column pairs to swap values between when
mirror-reflecting records.
suffixes : pair of str
Suffixes used to identify pairs of sided columns. e.g.: ('1', '2'),
('_x', '_y'), etc.
sort : bool
Whether to sort the output dataframe by bin_id and bin2_id.
validate : bool
Whether to do type- and bounds-checking on the anchor position
columns. Raises BadInputError.
Returns
-------
callable :
Function of one argument that takes a raw dataframe and returns a
sanitized dataframe with bin IDs assigned.
"""
if schema is not None:
try:
options = SANITIZE_PRESETS[schema].copy()
except KeyError:
raise ValueError("Unknown schema: '{}'".format(schema))
else:
options = {}
options.update(**kwargs)
chromsizes = get_chromsizes(bins)
options["gs"] = GenomeSegmentation(chromsizes, bins)
return partial(_sanitize_records, **options)
def _sanitize_pixels(
chunk,
gs,
is_one_based=False,
tril_action="reflect",
bin1_field="bin1_id",
bin2_field="bin2_id",
sided_fields=(),
suffixes=("1", "2"),
sort=True,
):
if is_one_based:
chunk[bin1_field] -= 1
chunk[bin2_field] -= 1
if tril_action is not None:
is_tril = chunk[bin1_field] > chunk[bin2_field]
if np.any(is_tril):
if tril_action == "reflect":
chunk.loc[is_tril, bin1_field], chunk.loc[is_tril, bin2_field] = (
chunk.loc[is_tril, bin2_field],
chunk.loc[is_tril, bin1_field],
)
for field in sided_fields:
chunk.loc[is_tril, field + suffixes[0]], chunk.loc[
is_tril, field + suffixes[1]
] = (
chunk.loc[is_tril, field + suffixes[1]],
chunk.loc[is_tril, field + suffixes[0]],
)
elif tril_action == "drop":
chunk = chunk[~is_tril]
elif tril_action == "raise":
raise BadInputError("Found bin1_id greater than bin2_id")
else:
raise ValueError("Unknown tril_action value: '{}'".format(tril_action))
return chunk.sort_values([bin1_field, bin2_field]) if sort else chunk
def _validate_pixels(chunk, n_bins, boundscheck, triucheck, dupcheck, ensure_sorted):
if boundscheck:
is_neg = (chunk["bin1_id"] < 0) | (chunk["bin2_id"] < 0)
if np.any(is_neg):
raise BadInputError("Found bin ID < 0")
is_excess = (chunk["bin1_id"] >= n_bins) | (chunk["bin2_id"] >= n_bins)
if np.any(is_excess):
raise BadInputError(
"Found a bin ID that exceeds the declared number of bins. "
"Check whether your bin table is correct."
)
if triucheck:
is_tril = chunk["bin1_id"] > chunk["bin2_id"]
if np.any(is_tril):
raise BadInputError("Found bin1_id greater than bin2_id")
if not isinstance(chunk, pd.DataFrame):
chunk = | pd.DataFrame(chunk) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import preprocessing, linear_model, metrics
import gc; gc.enable()
import random
import time, datetime
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import TheilSenRegressor, BayesianRidge
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
np.random.seed(1122)
# Util functions
def print_runtime(start, end):
print("runtime: {}".format( datetime.timedelta(seconds=(end-start)/60)))
def print_dataframe_size(name, df):
print("size of {}: {:.3f} MB".format(name, df.memory_usage(index=True).sum()/1E6))
# Read datasets
print('Reading datasets...')
start = time.time()
dtypes = {'id':'uint32', 'item_nbr':'int32', 'store_nbr':'int8', 'onpromotion':str}
print('Reading train and test...')
train = pd.read_csv('../input/train.csv', dtype=dtypes, parse_dates=['date'])
test = pd.read_csv('../input/test.csv', dtype=dtypes, parse_dates=['date'])
print('Reading others...')
items = pd.read_csv('../input/items.csv', dtype={'item_nbr':'int32', 'perishable':bool})
stores = pd.read_csv('../input/stores.csv', dtype={'store_nbr':'uint8', 'cluster':'uint8' })
transactions = | pd.read_csv('../input/transactions.csv', dtype={'store_nbr':'uint8'}, parse_dates=['date']) | pandas.read_csv |
''' A basic example to launch a set of neural network training sessions to seek for the
best hyperparameters set that minimize a target criteria
@brief based on hyperopt, start several training sessions concudted by the
script experiments_manager.py.
All trials are versionned as for any manually started single training session
with experiments_manager.py script.
The tuned hyperparameter values are stored in each trial folder as a python script :
-> 'experiment_settings_additionnal_hyperparameters.py'
-> this script is automatically loaded for training, validation, session restart on failure and serving/prediction
@author <NAME>, LISTIC, France
@notes: doc here https://github.com/hyperopt/hyperopt/wiki/FMin
-> a tutorial here : https://medium.com/district-data-labs/parameter-tuning-with-hyperopt-faa86acdfdce
-> some ideas to study the hyperparameter and loss behaviors : https://python-for-multivariate-analysis.readthedocs.io/a_little_book_of_python_for_multivariate_analysis.html
-> advanced method here : https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
'''
#basic parameters of the Hyperopt experiment
MAX_TRIALS = 100 # the maximum number of optimisation attempts
experiment_settings_file='examples/regression/mysettings_curve_fitting.py'
outputlog_folder = 'examples/hyperopt/hyperopt_experiments_curve_fitting'
toobig_loss=1e6 #the default trial loss value in case of job failure
# define a search space
from hyperopt import hp
space = {
'hiddenNeurons': hp.randint('hiddenNeurons', upper=20)+1,#test between 1 and 21 neurons
'learningRate': hp.uniform('learningRate', 1e-2, 1e-5),
}
##### Hyperopt code starts here
#librairies imports
import matplotlib as mpl
mpl.use('Agg') #force not to display plot but rather write them to file
from hyperopt import fmin, tpe, space_eval, Trials, STATUS_FAIL, STATUS_OK, plotting
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import os
#ensure log dir exists
if not(os.path.exists(outputlog_folder)):
os.makedirs(outputlog_folder)
# define the function that launches a single experiment trial
def single_experiment(hparams):
print('*** Hyperopt : New experiment trial with hparams='+str(hparams))
loss=toobig_loss
jobState=STATUS_FAIL
jobSessionFolder=None
training_iterations=0
#start the training session
try:
import experiments_manager
job_result = experiments_manager.run(experiment_settings_file, hparams)
print('trial ended successfully, output='+str(job_result))
loss=job_result['loss']
jobSessionFolder=job_result['sessionFolder']
jobState=STATUS_OK
training_iterations=job_result['global_step']
except Exception as e:
print('Job failed for some reason:'+str(e))
return {'loss': loss, 'status': jobState, 'jobSessionFolder':jobSessionFolder, 'training_iterations':training_iterations}
# minimize the objective over the space
trials = Trials()
try:
best = fmin(single_experiment, space, algo=tpe.suggest, trials=trials, max_evals=MAX_TRIALS)
#print('best='+str(best)) #WARNING, displayed hyperparameters do not take into account custom changes (+1, etc. in the space description)
print('space_eval='+str(space_eval(space, best)))
except Exception as e:
print('Hyperopt experiment interrupted not finish its job with error message:'+str(e))
best_trials = sorted(trials.results, key=lambda x: x['loss'], reverse=False)
print('best_trials:')
print(best_trials)
#experiments trials visualisation
#=> plot the evolution of each of the hyperparameters along the trials
import matplotlib.pyplot as plt
import pandas as pd
chosen_hparams_history=[]
finished_jobs=0
for trial in trials.trials:
if trial['result']['status']==STATUS_OK:
finished_jobs+=1
current_trial_desc={'tid':trial['tid'],
'loss':trial['result']['loss'],
'training_iterations':trial['result']['training_iterations']}
current_trial_desc.update(trial['misc']['vals'])
trial_summary=pd.DataFrame(current_trial_desc)
chosen_hparams_history.append(trial_summary)
trials_summary = | pd.concat(chosen_hparams_history) | pandas.concat |
import numpy as np
import pandas as pd
from cognite.model_hosting.data_fetcher import DataFetcher
class Model:
"""
You need to have a class called Model in a file called model.py at the
top level of your package.
It should contain
- Static train method
Which performs training and persist any state you need for
prediction. This can be serialized models, csv, or something else.
You just have to be able to store it in files.
- Static load method
Which loads your persisted state and return an instance of the Model
class that are ready for predictions.
- Predict method
Which use the persisted state to do predictions.
"""
@staticmethod
def train(open_artifact, data_spec):
"""
open_artifact:
The train method must accept a open_artifact argument. This is a function
that works the same way as the builtin open(), except it reads from
and writes to the root of a special storage location in the cloud
that belongs to the current model version.
data_spec:
An argument we pass in ourself when we initiate the training.
api_key, project:
Optional arguments that are passed in automatically from Model
Hosting if you specify them.
"""
data_fetcher = DataFetcher(data_spec)
data_fetcher.files.fetch("data")
data_fetcher.files.fetch("target")
X = | pd.read_csv("data") | pandas.read_csv |
import torch as t
from sklearn.metrics import precision_score, recall_score
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas as pd
import os
class ValidationKFold():
def __init__(self, model, criterion, data_loader, batch_size, max_body_length):
self.model = model
self.criterion = criterion
self.data_loader = data_loader
self.batch_size = batch_size
self.max_body_length = max_body_length
# for plotting
self.val_accuracy_at_fold = []
self.val_loss_at_fold = []
self.val_precision_at_inconsistent_at_fold = []
self.val_recall_at_inconsistent_at_fold = []
self.val_f1_at_inconsistent_at_epoch = []
self.val_threshold_at_fold = []
self.val_k = []
self.val_fold = []
self.val_dataset_size = []
def run(self, threshold, k, fold, dataset_size, store_all_predictions=False):
considered_cases = 0
correct_predictions = 0
losses = 0
correct_inconsistent_predictions = 0
total_inconsistent_labels = 0
total_inconsistent_predictions = 0
all_predictions = []
print("Starting validation")
with t.no_grad():
self.model.eval()
for batch_idx, batch in enumerate(self.data_loader):
xs = batch[0]
ys = batch[1]
ids = batch[2]
ys_pred = self.model(xs)
for i in range(len(ys_pred.cpu().numpy())):
pred = ys_pred.cpu().numpy()[i][0]
label = ys.cpu().numpy()[i][0]
if label == 0.0:
total_inconsistent_labels += 1
if pred < threshold:
total_inconsistent_predictions += 1
if pred < threshold or pred > (1 - threshold):
considered_cases += 1
losses += abs(label - pred)
if pred <= threshold and label == 0.0:
correct_predictions += 1
correct_inconsistent_predictions += 1
elif pred >= (1 - threshold) and label == 1.0:
correct_predictions += 1
# Use this to debug individual data points:
# print(f"Made prediction for pair with id {ids[i]}")
if store_all_predictions:
all_predictions.append([ids[i].item(), label.item(), pred.item()])
#all_predictions.append(pred.item())
print("Validation process has finished.")
val_accuracy = correct_predictions/considered_cases if considered_cases > 0 else 0
val_loss = losses/considered_cases if considered_cases > 0 else float("inf")
val_precision_at_inconsistent = correct_inconsistent_predictions/total_inconsistent_predictions if total_inconsistent_predictions > 0 else 0
val_recall_at_inconsistent = correct_inconsistent_predictions/total_inconsistent_labels if total_inconsistent_labels > 0 else 0
val_f1_at_inconsistent = 2 * ((val_precision_at_inconsistent * val_recall_at_inconsistent)/(val_precision_at_inconsistent + val_recall_at_inconsistent))
print("================================================================================")
print(f"results for fold {fold + 1} out ({k}-folds):")
print(
f"val_loss = {round(val_loss, 4)}, val_accuracy = {round(val_accuracy, 4)}")
print(
f"val_precision@I = {round(val_precision_at_inconsistent, 4)}, val_recall@I = {round(val_recall_at_inconsistent, 4)}")
print(
f"val_F1@I = {round(val_f1_at_inconsistent, 4)}")
# update list of measurements for plotting
self.val_accuracy_at_fold.append(val_accuracy)
self.val_loss_at_fold.append(val_loss)
self.val_precision_at_inconsistent_at_fold.append(val_precision_at_inconsistent)
self.val_recall_at_inconsistent_at_fold.append(val_recall_at_inconsistent)
self.val_f1_at_inconsistent_at_epoch.append(val_f1_at_inconsistent)
self.val_threshold_at_fold.append(threshold)
self.val_k.append(k)
self.val_fold.append(fold + 1)
self.val_dataset_size.append(dataset_size)
return all_predictions
def save_data(self, model_type):
# Create CSV file and add header if it doesn't exist
if not os.path.isfile(f'./data_{model_type}.csv'):
columns = ['loss', 'accuracy', 'precision_at_inconsistent', 'recall_at_inconsistent', 'f1_at_inconsistent', 'threshold', 'k', 'fold', 'dataset_size']
with open(f'./data_{model_type}.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(columns)
df = pd.read_csv(f'./data_{model_type}.csv')
df_new_data = pd.DataFrame({
'loss': self.val_loss_at_fold,
'accuracy': self.val_accuracy_at_fold,
'precision_at_inconsistent': self.val_precision_at_inconsistent_at_fold,
'recall_at_inconsistent': self.val_recall_at_inconsistent_at_fold,
'f1_at_inconsistent': self.val_f1_at_inconsistent_at_epoch,
'threshold': self.val_threshold_at_fold,
'k': self.val_k,
'fold': self.val_fold,
'dataset_size': self.val_dataset_size
})
df = df.append(df_new_data)
df.to_csv(f'./data_{model_type}.csv', index=False)
def save_experimental_data(self, model_type):
# Create CSV file and add header if it doesn't exist
if not os.path.isfile(f'./experimental_data_{model_type}.csv'):
columns = ['loss', 'accuracy', 'precision_at_inconsistent', 'recall_at_inconsistent', 'f1_at_inconsistent', 'training_size', 'execution', 'epoch']
with open(f'./experimental_data_{model_type}.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(columns)
df = | pd.read_csv(f'./experimental_data_{model_type}.csv') | pandas.read_csv |
import os
import re
import sys
import pickle
import csv
import gzip
import dill
import numpy as np
from collections import Counter
from itertools import chain
from bs4 import BeautifulSoup
import pandas as pd
from gensim import corpora
from gensim.parsing.preprocessing import STOPWORDS
from collections import defaultdict
from joblib import Parallel, delayed
def tokenize_cnn(inputdir, inputfile, outputdir, maxtokens=10000):
df = pd.read_csv(os.path.join(inputdir, inputfile))
# Clean up summaries
df['true_summary'] = df['true_summary'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
df['sentence'] = df['sentence'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
df['query'] = df['query'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
frequency = defaultdict(int)
n = df.shape[0]
div = n // 10
qtokens, stokens, tstokens = [], [], []
for i, row in df.iterrows():
qtokens+= [row['query'].split(" ")]
stokens+= [row['sentence'].split(" ")]
tstokens+= [row['true_summary'].split(" ")]
if ((i + 1) % div) == 0:
print("%i/%i (%i%%) complete rows." % (i + 1, n, (i + 1) / float(n) * 100 ))
# Getting the dictionary with token info
dictionary = corpora.Dictionary(stokens + qtokens + tstokens )
# Mapping to numeric list -- adding plus one to tokens
dictionary.token2id = {k: v + 1 for k,v in dictionary.token2id.items()}
dictionary.id2token = {v:k for k,v in dictionary.token2id.items()}
print("Exporting word to index and dictionary to word indices")
output = open(os.path.join(outputdir,'LSTMDQN_Dic_token2id_cnn.pkl'), 'ab+')
pickle.dump(dictionary.token2id, output)
output.close()
output = open(os.path.join(outputdir,'LSTMDQN_Dic_id2token_cnn.pkl'), 'ab+')
pickle.dump(dictionary.id2token, output)
output.close()
odf0 = pd.DataFrame.from_dict(dictionary.dfs, orient='index').reset_index()
ofindf = pd.DataFrame.from_dict(dictionary.token2id, orient='index').reset_index()
odf0.columns = ['id', 'frequency']
ofindf.columns = ['token', 'id']
# Merge by token id
odf = | pd.merge(left=odf0, right=ofindf, on='id') | pandas.merge |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
from pandas.io import gbq
import pandas as pd
import pickle
import re
import os
class PatentLandscapeExpander:
"""Class for L1&L2 expansion as 'Automated Patent Landscaping' describes.
This object takes a seed set and a Google Cloud BigQuery project name and
exposes methods for doing expansion of the project. The logical entry-point
to the class is load_from_disk_or_do_expansion, which checks for cached
expansions for the given self.seed_name, and if a previous run is available
it will load it from disk and return it; otherwise, it does L1 and L2
expansions, persists it in a cached 'data/[self.seed_name]/' directory,
and returns the data to the caller.
"""
seed_file = None
# BigQuery must be enabled for this project
bq_project = 'patent-landscape-165715'
patent_dataset = 'patents-public-data:patents.publications_latest'
#tmp_table = 'patents._tmp'
l1_tmp_table = 'patents._l1_tmp'
l2_tmp_table = 'patents._l2_tmp'
antiseed_tmp_table = 'patents.antiseed_tmp'
country_codes = set(['US'])
num_anti_seed_patents = 15000
us_only = True
# ratios and multipler for finding uniquely common CPC codes from seed set
min_ratio_of_code_to_seed = 0.04
min_seed_multiplier = 50.0
# persisted expansion information
training_data_full_df = None
seed_patents_df = None
l1_patents_df = None
l2_patents_df = None
anti_seed_patents = None
seed_data_path = None
def __init__(self, seed_file, seed_name, bq_project=None, patent_dataset=None, num_antiseed=None, us_only=None, prepare_training=True):
self.seed_file = seed_file
self.seed_data_path = os.path.join('data', seed_name)
if bq_project is not None:
self.bq_project = bq_project
if patent_dataset is not None:
self.patent_dataset = patent_dataset
#if tmp_table is not None:
# self.tmp_table = tmp_table
if num_antiseed is not None:
self.num_anti_seed_patents = num_antiseed
if us_only is not None:
self.us_only = us_only
self.prepare_training = prepare_training
def load_seeds_from_bq(self, seed_df):
where_clause = ",".join("'" + seed_df.PubNum + "'")
if self.us_only:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
REGEXP_EXTRACT(b.publication_number, r'\w+-(\w+)-\w+') IN
(
{}
)
AND b.country_code = 'US'
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
else:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
b.publication_number IN
(
{}
)
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
seed_patents_df = gbq.read_gbq(
query=seed_patents_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return seed_patents_df
def load_seed_pubs(self, seed_file=None):
if seed_file is None:
seed_file = self.seed_file
#if self.us_only:
seed_df = pd.read_csv(seed_file, header=None, names=['PubNum'], dtype={'PubNum': 'str'})
#else:
# seed_df = pd.read_csv(seed_file, header=None, names=['publication_number'], dtype={'publication_number': 'str'})
return seed_df
def bq_get_num_total_patents(self):
if self.us_only:
num_patents_query = """
SELECT
COUNT(publication_number) AS num_patents
FROM
`patents-public-data.patents.publications` AS b
WHERE
country_code = 'US'
"""
else:
num_patents_query = """
SELECT
COUNT(publication_number) AS num_patents
FROM
`patents-public-data.patents.publications` AS b
"""
num_patents_df = gbq.read_gbq(
query=num_patents_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return num_patents_df
def get_cpc_counts(self, seed_publications=None):
where_clause = '1=1'
if seed_publications is not None:
if self.us_only:
where_clause = """
REGEXP_EXTRACT(b.publication_number, r'\w+-(\w+)-\w+') IN
(
{}
)
""".format(",".join("'" + seed_publications + "'"))
else:
where_clause = """
b.publication_number IN
(
{}
)
""".format(",".join("'" + seed_publications + "'"))
if self.us_only:
cpc_counts_query = """
SELECT
cpcs.code,
COUNT(cpcs.code) AS cpc_count
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(cpc) AS cpcs
WHERE
{}
AND cpcs.code != ''
AND country_code = 'US'
GROUP BY cpcs.code
ORDER BY cpc_count DESC;
""".format(where_clause)
else:
cpc_counts_query = """
SELECT
cpcs.code,
COUNT(cpcs.code) AS cpc_count
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(cpc) AS cpcs
WHERE
{}
AND cpcs.code != ''
GROUP BY cpcs.code
ORDER BY cpc_count DESC;
""".format(where_clause)
return gbq.read_gbq(
query=cpc_counts_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
def compute_uniquely_common_cpc_codes_for_seed(self, seed_df):
'''
Queries for CPC counts across all US patents and all Seed patents, then finds the CPC codes
that are 50x more common in the Seed set than the rest of the patent corpus (and also appear in
at least 5% of Seed patents). This then returns a Pandas dataframe of uniquely common codes
as well as the table of CPC counts for reference. Note that this function makes several
BigQuery queries on multi-terabyte datasets, so expect it to take a couple minutes.
You should call this method like:
uniquely_common_cpc_codes, cpc_counts_df = \
expander.compute_uniquely_common_cpc_codes_for_seed(seed_df)
where seed_df is the result of calling load_seed_pubs() in this class.
'''
print('Querying for all US CPC Counts')
us_cpc_counts_df = self.get_cpc_counts()
print(us_cpc_counts_df.shape)
print('Querying for Seed Set CPC Counts')
seed_cpc_counts_df = self.get_cpc_counts(seed_df.PubNum)
print(seed_cpc_counts_df.shape)
print("Querying to find total number of US patents")
num_patents_df = self.bq_get_num_total_patents()
num_seed_patents = seed_df.count().values[0]
num_us_patents = num_patents_df['num_patents'].values[0]
# Merge/join the dataframes on CPC code, suffixing them as appropriate
cpc_counts_df = us_cpc_counts_df.merge(
seed_cpc_counts_df, on='code', suffixes=('_us', '_seed')) \
.sort_values(ascending=False, by=['cpc_count_seed'])
# For each CPC code, calculate the ratio of how often the code appears
# in the seed set vs the number of total seed patents
cpc_counts_df['cpc_count_to_num_seeds_ratio'] = cpc_counts_df.cpc_count_seed / num_seed_patents
# Similarly, calculate the ratio of CPC document frequencies vs total number of US patents
cpc_counts_df['cpc_count_to_num_us_ratio'] = cpc_counts_df.cpc_count_us / num_us_patents
# Calculate how much more frequently a CPC code occurs in the seed set vs full corpus of US patents
cpc_counts_df['seed_relative_freq_ratio'] = \
cpc_counts_df.cpc_count_to_num_seeds_ratio / cpc_counts_df.cpc_count_to_num_us_ratio
# We only care about codes that occur at least ~4% of the time in the seed set
# AND are 50x more common in the seed set than the full corpus of US patents
uniquely_common_cpc_codes = cpc_counts_df[
(cpc_counts_df.cpc_count_to_num_seeds_ratio >= self.min_ratio_of_code_to_seed)
&
(cpc_counts_df.seed_relative_freq_ratio >= self.min_seed_multiplier)]
return uniquely_common_cpc_codes, cpc_counts_df
def get_set_of_refs_filtered_by_country(self, seed_refs_series, country_codes):
'''
Uses the refs column of the BigQuery on the seed set to compute the set of
unique references out of the Seed set.
'''
all_relevant_refs = set()
for refs in seed_refs_series:
for ref in refs.split(','):
if self.us_only:
country_code = re.sub(r'(\w+)-(\w+)-\w+', r'\1', ref)
if country_code in country_codes:
all_relevant_refs.add(ref)
else:
all_relevant_refs.add(ref)
return all_relevant_refs
# Expansion Functions
def load_df_to_bq_tmp(self, df, tmp_table):
'''
This function inserts the provided dataframe into a temp table in BigQuery, which
is used in other parts of this class (e.g. L1 and L2 expansions) to join on by
patent number.
'''
print('Loading dataframe with cols {}, shape {}, to {}'.format(
df.columns, df.shape, tmp_table))
gbq.to_gbq(
dataframe=df,
destination_table=tmp_table,
project_id=self.bq_project,
if_exists='replace',
verbose=False)
print('Completed loading temp table.')
def expand_l2(self, refs_series):
if self.us_only:
self.load_df_to_bq_tmp( | pd.DataFrame(refs_series, columns=['pub_num']) | pandas.DataFrame |
"""
util.py
Common functions used across code.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
from __future__ import division
import sys
import hashlib
import datetime
import warnings
import numpy as np
import pandas as pd
import scipy.stats as st
from pf.constants import DAYS_IN_YEAR
################################################################################################################################
# General Helper/Conversion Functions
################################################################################################################################
def get_age(date=datetime.datetime.now(), bday=datetime.datetime(1989, 3, 27)):
"""Calculate personal age given birthday"""
return np.round((date - bday).days / DAYS_IN_YEAR, 2)
def f2as(x=0.0):
"""Format number to accounting string"""
if np.isnan(x):
return ' '
else:
return '{:0,.2f}'.format(x) if x >= 0 else '({:0,.2f})'.format(np.abs(x))
def read_date_csv_file(filepath=''):
"""Convinience function for reading standard date index csv"""
df = | pd.read_csv(filepath, index_col=0, parse_dates=True) | pandas.read_csv |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
from __future__ import absolute_import, division, print_function
import os
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas.core.indexes.range import RangeIndex
from pandas.util.testing import assert_frame_equal
from statsmodels.tsa.statespace.structural import (
UnobservedComponents, UnobservedComponentsResultsWrapper)
from causalimpact import CausalImpact
from causalimpact.misc import standardize
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_date(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
date_rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_no_exog(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert ci.model.exog is None
assert ci.model.endog_names == 'y'
assert ci.model.exog_names is None
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_np_array(rand_data, pre_int_period, post_int_period):
data = rand_data.values
ci = CausalImpact(data, pre_int_period, post_int_period)
assert_array_equal(ci.data, data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = pd.DataFrame(data[pre_int_period[0]: pre_int_period[1] + 1, :])
assert_frame_equal(ci.pre_data, pre_data)
post_data = pd.DataFrame(data[post_int_period[0]: post_int_period[1] + 1, :])
post_data.index = RangeIndex(start=len(pre_data), stop=len(rand_data))
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == [1, 2]
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_causal_cto_w_no_standardization(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period, standardize=False)
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert ci.normed_pre_data is None
assert ci.normed_post_data is None
assert ci.mu_sig is None
assert_array_equal(ci.model.endog, pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.p_value > 0 and ci.p_value < 1
def test_causal_cto_w_seasons(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4}, {'period': 3}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4, 'harmonics': 1},
{'period': 3, 'harmonis': 1}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [1, 1]
def test_causal_cto_w_custom_model_and_seasons(rand_data, pre_int_period,
post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:],
freq_seasonal=[{'period': 4}, {'period': 3}])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
def test_causal_cto_w_custom_model(rand_data, pre_int_period, post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
def test_causal_cto_raises_on_None_input(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(None, pre_int_period, post_int_period)
assert str(excinfo.value) == 'data input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, None, post_int_period)
assert str(excinfo.value) == 'pre_period input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, None)
assert str(excinfo.value) == 'post_period input cannot be empty'
def test_invalid_data_input_raises():
with pytest.raises(ValueError) as excinfo:
CausalImpact('test', [0, 5], [5, 10])
assert str(excinfo.value) == 'Could not transform input data to pandas DataFrame.'
data = [1, 2, 3, 4, 5, 6, 2 + 1j]
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data must contain only numeric values.'
data = np.random.randn(10, 2)
data[0, 1] = np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data cannot have NAN values.'
def test_invalid_response_raises():
data = np.random.rand(100, 2)
data[:, 0] = np.ones(len(data)) * np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot have just Null values.'
data[0:2, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == ('Input response must have more than 3 non-null points '
'at least.')
data[0:3, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot be constant.'
def test_invalid_alpha_raises(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=1)
assert str(excinfo.value) == 'alpha must be of type float.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=2.)
assert str(excinfo.value) == (
'alpha must range between 0 (zero) and 1 (one) inclusive.'
)
def test_custom_model_input_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model='test')
assert str(excinfo.value) == 'Input model must be of type UnobservedComponents.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.level = False
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have level attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.exog = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have exog attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.data = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have data attribute set.'
def test_kwargs_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize='yes')
assert str(excinfo.value) == 'Standardize argument must be of type bool.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[7])
assert str(excinfo.value) == (
'nseasons must be a list of dicts with the required key "period" and the '
'optional key "harmonics".'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'test': 8}])
assert str(excinfo.value) == 'nseasons dicts must contain the key "period" defined.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'period': 4, 'harmonics': 3}])
assert str(excinfo.value) == (
'Total harmonics must be less or equal than periods divided by 2.')
def test_periods_validation(rand_data, date_rand_data):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [4, 7])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180201'],
['20180110', '20180210'])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [15, 11])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180115', '20180111'])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 2], [15, 11])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180102'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 0], [15, 11])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180105', '20180101'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, 0, [15, 11])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, '20180101', ['20180115', '20180130'])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10, 30], [15, 11])
assert str(excinfo.value) == (
'Period must have two values regarding the beginning '
'and end of the pre and post intervention data.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, None], [15, 11])
assert str(excinfo.value) == 'Input period cannot have `None` values.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 5.5], [15, 11])
assert str(excinfo.value) == 'Input must contain either int, str or pandas Timestamp'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [-2, 10], [11, 20])
assert str(excinfo.value) == (
'-2 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10], [11, 2000])
assert str(excinfo.value) == (
'2000 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, ['20180101', '20180110'],
['20180111', '20180130'])
assert str(excinfo.value) == (
'20180101 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180111', '20200130'])
assert str(excinfo.value) == ('20200130 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20170101', '20180110'],
['20180111', '20180120'])
assert str(excinfo.value) == ('20170101 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [pd.Timestamp('20180101'), pd.Timestamp('20180110')],
[ | pd.Timestamp('20180111') | pandas.Timestamp |
import contextlib
import os
import subprocess
import textwrap
import warnings
import pytest
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
@contextlib.contextmanager
def catch_to_csv_depr():
# Catching warnings because Series.to_csv has
# been deprecated. Remove this context when
# Series.to_csv has been aligned.
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
yield
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as path:
with catch_to_csv_depr():
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with | tm.ensure_clean() | pandas.util.testing.ensure_clean |
"""classicML中的数据预处理模块."""
import copy
from abc import ABC
from abc import abstractmethod
import numpy as np
import pandas as pd
from classicML import _cml_precision
class PreProcessor(ABC):
"""预处理器基类,
预处理器将实现一系列预处理操作, 部分预处理器还有对应的逆操作.
Attributes:
name: str, default='preprocessor',
预处理器名称.
Raises:
NotImplementedError: __call__方法需要用户实现.
NotImplemented: inverse方法需要用户实现.
"""
def __init__(self, name='preprocessor'):
"""
Arguments:
name: str, default='preprocessor',
预处理器名称.
"""
self.name = name
@abstractmethod
def __call__(self, *args, **kwargs):
"""预处理操作."""
raise NotImplementedError
def inverse(self, *args, **kwargs):
"""预处理逆操作."""
raise NotImplemented
class DummyEncoder(PreProcessor):
"""Dummy编码器.
Attributes:
name: str, default='dummy_encoder',
Dummy编码器名称.
dtype: str, default='float32',
编码后的标签的数据类型.
class_indices: dict,
类标签和类索引的映射字典.
"""
def __init__(self, name='dummy_encoder', dtype=_cml_precision.float):
"""
Arguments:
name: str, default='dummy_encoder',
Dummy编码器名称.
dtype: str, default='float32',
编码后的标签的数据类型.
"""
super(DummyEncoder, self).__init__(name=name)
self.dtype = dtype
self.class_indices = dict()
def __call__(self, labels):
"""进行Dummy编码.
Arguments:
labels: array-like, 原始的标签.
Returns:
Dummy编码后的标签.
"""
labels = np.asarray(labels)
num_classes = np.unique(labels)
m = len(labels) # 样本总数.
n = len(num_classes) # 类别总数.
# 构建映射字典.
for index, value in enumerate(num_classes):
self.class_indices.update({value: index})
dummy_label = np.zeros(shape=[m, n], dtype=self.dtype)
for index, label in enumerate(labels):
dummy_label[index][self.class_indices[label]] = 1
return dummy_label
class Imputer(PreProcessor):
"""缺失值填充器,
连续值将填充均值, 离散值将填充众数.
Attributes:
name: str, default='imputer',
缺失值填充器名称.
"""
def __init__(self, name='imputer'):
"""
Arguments:
name: str, default='imputer',
缺失值填充器名称.
"""
super(Imputer, self).__init__(name=name)
def __call__(self, data):
"""进行缺失值填充.
Arguments:
data: array-like, 输入的数据.
Returns:
填充后的数据.
"""
preprocessed_data = copy.deepcopy(data)
for column in range(data.shape[1]):
preprocessed_data[:, column] = self._fillna(data[:, column])
return preprocessed_data
@staticmethod
def _fillna(column):
"""填充数据列中的缺失值.
Arguments:
column: array-like, 输入的数据列.
Returns:
填充后的数据列.
"""
try:
new_column = pd.DataFrame(column, dtype=_cml_precision.float)
new_column.fillna(value=np.mean(new_column.dropna().values),
inplace=True)
except ValueError:
new_column = | pd.DataFrame(column) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = | tm.makeUnicodeIndex(10, name="a") | pandas.util.testing.makeUnicodeIndex |
import pandas as pd
# Series
number_list = | pd.Series([1,2,3,4,5,6]) | pandas.Series |
"""Provides functions to load entire benchmark result datasets
"""
import os
import io
import glob
import gzip
import tarfile
import warnings
import numpy
import pandas
from .parse import IorOutput, MdWorkbenchOutput
from .contention import validate_contention_dataset, JobOverlapError, ShortJobError
def _load_ior_output_stream(stream, fname, all_results=None):
"""Recursive function that loads one or more IOR output files
Args:
stream (io.TextIOWrapper): file-like object containing the stdout of
an IOR job or jobs.
fname (str): file name associated with stream.
all_results (pandas.DataFrame or None): Dataframe to which loaded
results should be appended.
Returns:
pandas.DataFrame: all_results with newly loaded data appended
as new rows.
"""
if isinstance(stream, tarfile.TarFile):
for member in stream.getmembers():
handle = stream.extractfile(member)
if handle: # directories will have handle = None
all_results = _load_ior_output_stream(
io.TextIOWrapper(handle),
member.name,
all_results)
else:
result = IorOutput(stream, normalize_results=True)
if not result or 'results' not in result:
warnings.warn('invalid output in {}'.format(fname))
return all_results
result.add_filename_metadata(fname)
results_df = pandas.DataFrame.from_dict(result['results']).dropna(subset=['bw(mib/s)'])
results_df['filename'] = fname
# graft in some columns from summary lines - indices should be the same
summaries_df = pandas.DataFrame.from_dict(result['summaries'])
if 'aggs(mib)' in summaries_df:
if 'stonewall_bytes_moved' in results_df:
na_indices = results_df[results_df['stonewall_bytes_moved'].isna()].index
if na_indices.shape[0] > 0:
results_df.loc[na_indices, 'stonewall_bytes_moved'] = summaries_df.loc[na_indices, 'aggs(mib)'] * 2**20
else:
results_df['stonewall_bytes_moved'] = summaries_df['aggs(mib)'] * 2**20
if all_results is None:
all_results = results_df
else:
if len(all_results.columns) != len(results_df.columns):
warn_str = 'inconsistent input file: {}' + \
' (file only has {:d} of {:d} expected columns)\n' +\
'this file: {}\n' + \
'expected: {}\n' + \
'diff: {}'
warnings.warn(warn_str.format(
fname,
len(results_df.columns),
len(all_results.columns),
','.join(results_df.columns),
','.join(all_results.columns),
','.join(list(set(all_results.columns) ^ set(results_df.columns)))))
all_results = | pandas.concat((all_results, results_df)) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# import packages
import re
import pandas as pd
from bs4 import BeautifulSoup
from collections import defaultdict
# Function definitions
def extract_all_characters(soup):
"""
Function to extract characters from XML file of a play.
Extracts the value of two tag attributes
One relates to Act/Scene divisions and the other is for
the name of the speaking character. These should be fairly
clear from the code.
This function should be modified to deal with different XML schema.
"""
idList = []
for a in soup.findAll(['div', 'sp']):
if 'type' in a.attrs.keys():
idList.append(a.attrs['type'])
elif 'who' in a.attrs.keys():
idList.append(a.attrs['who'])
df = | pd.DataFrame(idList, columns=['names']) | pandas.DataFrame |
"""
Data: Temeprature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = | pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx') | pandas.read_excel |
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark SQL Data Source") \
.getOrCreate()
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
print(pdf)
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
df.show()
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
print(result_pdf)
import pandas as pd
from pyspark.sql.functions import pandas_udf
@pandas_udf("col1 string, col2 long")
def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
s3['col2'] = s1 + s2.str.len()
return s3
# Create a Spark DataFrame that has three columns including a sturct column.
df = spark.createDataFrame(
[[1, "a string", ("a nested string",)]],
"long_col long, string_col string, struct_col struct<col1:string>")
df.printSchema()
# root
# |-- long_column: long (nullable = true)
# |-- string_column: string (nullable = true)
# |-- struct_column: struct (nullable = true)
# | |-- col1: string (nullable = true)
df.show()
df2 = df.select(func("long_col", "string_col", "struct_col"))
df2.printSchema()
# |-- func(long_col, string_col, struct_col): struct (nullable = true)
# | |-- col1: string (nullable = true)
# | |-- col2: long (nullable = true)
df2.show()
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a: pd.Series, b: pd.Series) -> pd.Series:
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame( | pd.DataFrame(x, columns=["x"]) | pandas.DataFrame |
import yaml
import sqlalchemy
from sqlalchemy.engine.url import URL
from sqlalchemy import Table
import pandas as pd
import logging
import logging.config
import sys
import joblib
import os
import csv
from io import StringIO
from psycopg2 import sql
import datetime
import psycopg2
import pprint
from contextlib import contextmanager
import multiprocessing as mp
import functools
from pandas.api.types import is_numeric_dtype, is_string_dtype
from os import listdir
from os.path import isfile, join
import re
from pathlib import Path
import boto3
from botocore.exceptions import ClientError
from box import Box
from urllib.parse import urlparse
cur_dir = os.path.dirname(os.path.realpath(__file__))
artifacts_path = cur_dir + "/../artifacts/"
def get_configuration(config_filename):
#Get and read configuration.
config_file = cur_dir + "/../conf/"+ config_filename
with open(config_file) as ymlfile:
cfg= yaml.safe_load(ymlfile)
return Box(cfg)
config=get_configuration("config.yml")
logging_config= get_configuration("logging_config.yml")
logging.config.dictConfig(logging_config)
logger= logging.getLogger(__name__)
##############################################################
def get_file(filename):
try:
return joblib.load(artifacts_path+filename)
except Exception as e:
logger.error(e)
def get_file_or_none(filename):
"""
Loads the file using joblib. Returns None if file not found.
@param filename
@return Joblib-loaded file, or None is not found
"""
full_filename = artifacts_path+filename
if os.path.isfile(full_filename):
return get_file(filename)
return None
def save_file(obj, filename):
try:
path = Path(artifacts_path)
path.mkdir(parents=True, exist_ok=True)
joblib.dump(obj, artifacts_path+filename)
except Exception as e:
print(e)
def get_table_name(table_type):
return get_configuration("config.yml")["TABLES"][table_type]["NAME"]
def get_table_configuration(table_type):
return get_configuration("config.yml")["TABLES"][table_type]
def get_engine(table_type):
try:
engine=sqlalchemy.create_engine(
get_database_uri(table_type),
echo=False
)
logger.info("Connected to %s", engine)
return engine
except Exception as e:
logger.error(e)
def get_database_uri(table_type):
try:
return os.environ[get_configuration("config.yml")["TABLES"][table_type]["DATABASE"]]
except Exception as e:
logger.info(
"DATABASE URI environment variable not found. Looking for credentials in config."
)
db = get_configuration("config.yml")["TABLES"][table_type]["DATABASE"]
return get_configuration("config.yml")["DATABASES"][db]["URI"]
def get_database_metadata(table_type):
try:
return sqlalchemy.MetaData(
get_engine(table_type)
)
except Exception as e:
logger.error(e)
def reflected_table_object(table_type):
try:
return Table(
get_table_name(table_type),
get_database_metadata(table_type),
autoload_with=get_engine(table_type)
)
except Exception as e:
logger.error(e)
###############################################################
def read_tables(tables_to_read):
temp_table_list = []
for table_type in tables_to_read:
temp_table = read_table(table_type)
if len(temp_table)>0:
temp_table_list.append(temp_table)
return pd.concat(
temp_table_list,
axis=0,
sort=False
)
def get_list_of_columns_to_read_for_table(table_type):
return get_configuration("config.yml")["TABLES"][table_type]["READ_COLUMNS"]
def get_list_of_columns_to_write_for_table(table_type):
return get_configuration("config.yml")["TABLES"][table_type]["WRITE_COLUMNS"]
def query_builder(
table_type,
columns_to_read=None,
filter_column=None,
filter_values=None,
filter_type=None,
limit=None
):
if filter_column is None and filter_values is None:
query = sql.SQL("select {fields} from {table}").format(
fields=sql.SQL(', ').join(sql.Identifier(n) for n in columns_to_read),
table=sql.Identifier(get_table_name(table_type))
)
elif limit is not None:
query = sql.SQL("select {fields} from {table} where {col} "+filter_type+" %s"+" limit "+str(limit)).format(
fields=sql.SQL(', ').join(sql.Identifier(n) for n in columns_to_read),
table=sql.Identifier(get_table_name(table_type)),
col=sql.Identifier(filter_column)
)
else:
query = sql.SQL("select {fields} from {table} where {col} "+filter_type+" %s").format(
fields=sql.SQL(', ').join(sql.Identifier(n) for n in columns_to_read),
table=sql.Identifier(get_table_name(table_type)),
col=sql.Identifier(filter_column)
)
return query
def read_table(
table_type,
columns_to_read=None,
filter_column=None,
filter_values=None,
filter_type=None,
limit=None
):
"""
Read a postgresql Table using psycopg2 and return it as a pandas dataframe.
"""
start_time = datetime.datetime.now()
logger.info(
"Beginning to read from %s data at: %s",
table_type,
start_time
)
db_uri = get_database_uri(table_type)
conn = psycopg2.connect(db_uri)
if columns_to_read is None:
columns_to_read = [c.name for c in reflected_table_object(table_type).columns]
query = query_builder(
table_type,
columns_to_read=columns_to_read,
filter_column=filter_column,
filter_values=filter_values,
filter_type=filter_type,
limit=limit
)
cur = conn.cursor()
if filter_column is None:
cur.execute(query)
else:
cur.execute(query,(filter_values,))
data = pd.DataFrame(cur.fetchall(),columns=columns_to_read)
end_time = datetime.datetime.now()
time_diff = (end_time-start_time).total_seconds()
logger.info(
"Read %d rows in %d minutes %d seconds",
data.shape[0],
time_diff // 60,
time_diff % 60
)
return data
def delete_table(table_type):
"""
Delete a SQL table.
"""
table = reflected_table_object(table_type)
if table is not None:
logger.info(
'Deleting %s table',
table_type
)
d = table.delete()
d.execute()
return
def reset_tables(list_tables_to_delete):
"""
Delete list of table_types provided.
"""
for table_type in list_tables_to_delete:
try:
delete_table(table_type)
except sqlalchemy.exc.NoSuchTableError:
logger.info(
"%s doesn't exist",
table_type
)
return
def psql_insert_copy(table, conn, keys, data_iter):
"""
Utility function for converting table object to csv and then to write those rows to a postgre database.
"""
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
output = StringIO()
writer = csv.writer(output, delimiter = '\t')
writer.writerows(data_iter)
output.seek(0)
cur.copy_from(
output,
'"'+table.name+'"',
null=""
)
return
def write_table(
table_type,
data,
if_exists="replace",
index=False
):
"""
Write a Pandas dataframe to a Postgresql table.
"""
start_time = datetime.datetime.now()
logger.info(
"Starting writing %d rows with columns: %s at: %s",
data.shape[0],
data.columns,
start_time
)
# Create table metadata
data.head(0).to_sql(
get_table_name(table_type),
get_engine(table_type),
if_exists=if_exists,
index=index
)
# Write table
data.to_sql(
get_table_name(table_type),
get_engine(table_type),
method=psql_insert_copy,
if_exists=if_exists,
chunksize=100000,
index=False
)
end_time = datetime.datetime.now()
time_diff = (end_time-start_time).total_seconds()
logger.info(
"Writing to %s finished in: %d minutes %d seconds",
get_table_name(table_type),
time_diff // 60,
time_diff % 60
)
return
###############################################################
class Pipeline:
"""
Create a Pipeline Object, which is basically an iterable of Python callables.
"""
def __init__(self, value=None, function_pipeline=[]):
self.value = value
self.function_pipeline = function_pipeline
def execute(self):
return functools.reduce(lambda v, f: f(v), self.function_pipeline, self.value)
def add_function(self,func):
self.function_pipeline.append(func)
def __repr__(self):
return str(pprint.pprint({"Pipeline": [f.__name__ for f in self.function_pipeline],
"Dataframe shape": self.value.shape,
"Dataframe columns": self.value.columns}))
def fill_na(dataframe):
for col in dataframe.columns:
if is_numeric_dtype(dataframe[col]):
dataframe[col]=dataframe[col].fillna(0)
elif is_string_dtype(dataframe[col]):
dataframe[col]=dataframe[col].fillna("")
return dataframe
def get_latest_model():
available_models=[f for f in listdir(file_path) if f.startswith("model")]
logger.info(
"Available models: %s",
str(available_models)
)
if len(available_models)>0:
return joblib.load(file_path+max(available_models))
else:
logger.info("No model found.")
def get_s3_client(service_name):
session = boto3.session.Session()
return session.client(
service_name=service_name,
endpoint_url=config.s3_endpointurl
)
def get_s3_resource(service_name):
return boto3.resource(service_name,
endpoint_url=os.environ["MLFLOW_S3_ENDPOINT_URL"],
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
use_ssl=True,
verify=False
)
def check_file_exist_s3(key, bucket):
s3_resource = get_s3_resource("s3")
s3_bucket=s3_resource.Bucket(bucket)
for bucket_object in s3_bucket.objects.all():
if key in bucket_object.key:
return True
else:
return False
def sync_s3_bucket_to_artifacts(bucket,project_name=None):
if project_name is None:
project_name="kyt-cib"
files_to_sync = []
s3_resource = get_s3_resource("s3")
s3_bucket = s3_resource.Bucket(bucket)
for bucket_object in s3_bucket.objects.all():
if project_name in bucket_object.key:
files_to_sync.append(bucket_object.key)
for f in files_to_sync:
print("Downloading: ", f)
directory, filename=os.path.split(f)
path = Path(os.path.join(os.getcwd(),directory))
print("Saving to: ", )
path.mkdir(parents=True, exist_ok=True)
get_s3_client("s3").download_file(
Bucket=bucket,
Key=f,
Filename=os.path.join(path,filename)
)
return
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
# Upload the file
s3_client = get_s3_client("s3")
try:
response = s3_client.upload_file(
file_name,
bucket,
object_name
)
except ClientError as e:
logger.error(e)
return False
return True
def sync_artifacts_to_s3_bucket(bucket, directory, if_exists="skip"):
files_to_sync=[]
for root, dirs, files in os.walk(directory):
for filename in files:
files_to_sync.append(
(
os.path.join(root, filename),
"kyt-cib/"+os.path.join(root, filename)
)
)
for f in files_to_sync:
if if_exists=="skip":
if not check_file_exist_s3(f[1], bucket):
upload_file(f[0], bucket, f[1])
elif if_exists=="replace":
upload_file(f[0], bucket, f[1])
return
def get_url(url, sep):
url=urlparse(url)
if url.scheme=="s3":
client=get_s3_client("s3")
obj=client.get_object(Bucket=url.netloc, Key=url.path[1:])
return pd.read_csv(obj['Body'],sep=sep)
else:
return | pd.read_csv(url,sep=sep) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------#
'''
The main task of this module is to acquire, read, and prepare
geophysical variables related to wave height from satellite
altimetry files for further use.
'''
# --- import libraries ------------------------------------------------#
# standard library imports
import sys
import numpy as np
from datetime import datetime, timedelta
import os
from copy import deepcopy
import time
from urllib.request import urlretrieve, urlcleanup # python3
from ftplib import FTP
import netCDF4 as netCDF4
from dateutil.relativedelta import relativedelta
from joblib import Parallel, delayed
import pandas as pd
import pyproj
import zipfile
import tempfile
from tqdm import tqdm
import xarray as xr
# own imports
from wavy.ncmod import ncdumpMeta
from wavy.ncmod import read_netcdfs, get_filevarname
from wavy.ncmod import find_attr_in_nc, dumptonc_ts_sat
from wavy.utils import find_included_times, NoStdStreams
from wavy.utils import sort_files, parse_date
from wavy.utils import make_pathtofile, make_subdict
from wavy.utils import finditem, haversineA
from wavy.credentials import get_credentials
from wavy.modelmod import make_model_filename_wrapper
from wavy.modelmod import read_model_nc_output_lru
from wavy.wconfig import load_or_default
from wavy.filtermod import filter_main,vardict_unique
from wavy.filtermod import rm_nan_from_vardict
# ---------------------------------------------------------------------#
# read yaml config files:
region_dict = load_or_default('region_specs.yaml')
model_dict = load_or_default('model_specs.yaml')
satellite_dict = load_or_default('satellite_specs.yaml')
variable_info = load_or_default('variable_info.yaml')
# --- global functions ------------------------------------------------#
def tmploop_get_remote_files(i,matching,user,pw,
server,remote_path,
path_local):
"""
Function to download files using ftp. Tries 10 times before failing.
"""
print("File: ",matching[i])
dlstr=('ftp://' + user + ':' + pw + '@'
+ server + remote_path + matching[i])
for attempt in range(10):
print ("Attempt to download data: ")
try:
print ("Downloading file")
urlretrieve(dlstr, os.path.join(path_local, matching[i]))
urlcleanup()
except Exception as e:
print (e.__doc__)
print (e.message)
print ("Waiting for 10 sec and retry")
time.sleep(10)
else:
break
else:
print ('An error was raised and I ' +
'failed to fix problem myself :(')
print ('Exit program')
sys.exit()
def get_remote_files_cmems(\
sdate,edate,twin,nproc,sat,product,path_local,dict_for_sub):
'''
Download swath files from CMEMS and store them at defined
location. Time stamps in file name stand for:
from, to, creation
'''
# credentials
server = satellite_dict[product]['src']['server']
user, pw = get_credentials(remoteHostName = server)
tmpdate = deepcopy(sdate)
filesort = False
while (tmpdate <= edate):
# create remote path
path_template = satellite_dict[product]['src']\
['path_template']
strsublst = satellite_dict[product]['src']\
['strsub']
subdict = make_subdict(strsublst,class_object_dict=dict_for_sub)
path_remote = make_pathtofile(path_template,\
strsublst,subdict,\
date=tmpdate)
if path_local is None:
# create local path
path_template = satellite_dict[product]['dst']\
['path_template']
strsublst = satellite_dict[product]['dst']\
['strsub']
path_local = make_pathtofile(path_template,\
strsublst,subdict,\
date=sdate)
filesort = True
print ('# ----- ')
print ('Chosen source: ')
print (sat + ' values from ' + product + ': ' + server)
print ('# ----- ')
# get list of accessable files
ftp = FTP(server)
ftp.login(user, pw)
ftp.cwd(path_remote)
content=FTP.nlst(ftp)
#choose files according to sdate/edate
tmplst=[]
tmpdate_new = tmpdate-timedelta(minutes=twin)
tmpdate_end = edate+timedelta(minutes=twin)
while (tmpdate_new <= tmpdate_end):
matchingtmp = [s for s in content
if tmpdate_new.strftime('%Y%m%dT%H')
in s ]
tmplst = tmplst + matchingtmp
tmpdate_new = tmpdate_new + timedelta(minutes=twin)
matching = np.unique(tmplst)
# check if download path exists if not create
if not os.path.exists(path_local):
os.makedirs(path_local,exist_ok=True)
# Download matching files
print ('Downloading ' + str(len(matching))
+ ' files: .... \n')
print ("Used number of possible simultaneous downloads "
+ str(nproc) + "!")
Parallel(n_jobs=nproc)(
delayed(tmploop_get_remote_files)(
i,matching,user,pw,server,
path_remote,path_local
) for i in range(len(matching))
)
# update time
tmpdate = datetime((tmpdate + relativedelta(months=+1)).year,
(tmpdate + relativedelta(months=+1)).month,1)
if filesort is True:
# sort files
print("Data is being sorted into subdirectories " \
+ "year and month ...")
filelst = [f for f in os.listdir(path_local)
if os.path.isfile(os.path.join(path_local,f))]
sort_files(path_local,filelst,product,sat)
print ('Files downloaded to: \n', path_local)
def get_remote_files_aviso(\
sdate,edate,twin,nproc,sat,product,path_local,dict_for_sub):
'''
Download swath files from AVISO+ and store them at defined
location.
'''
# credentials
server = satellite_dict[product]['src']['server']
user, pw = get_credentials(remoteHostName = server)
tmpdate = deepcopy(sdate)
filesort = False
while (tmpdate <= edate):
# create remote path
path_template = satellite_dict[product]['src']\
['path_template']
strsublst = satellite_dict[product]['src']\
['strsub']
subdict = make_subdict(strsublst,class_object_dict=dict_for_sub)
path_remote = make_pathtofile(path_template,\
strsublst,subdict,\
date=tmpdate)
if path_local is None:
# create local path
path_template = satellite_dict[product]['dst']\
['path_template']
strsublst = satellite_dict[product]['dst']\
['strsub']
path_local = make_pathtofile(path_template,\
strsublst,subdict,\
date=sdate)
filesort = True
print ('# ----- ')
print ('Chosen source: ')
print (sat + ' values from ' + product + ': ' + server)
print ('# ----- ')
# get list of accessable files
ftp = FTP(server)
ftp.login(user, pw)
ftp.cwd(path_remote)
content=FTP.nlst(ftp)
#choose files according to sdate/edate
tmplst=[]
tmpdate_new = tmpdate-timedelta(minutes=twin)
tmpdate_end = edate+timedelta(minutes=twin)
while (tmpdate_new <= tmpdate_end):
matchingtmp = [s for s in content
if tmpdate_new.strftime('%Y%m%dT%H')
in s ]
tmplst = tmplst + matchingtmp
tmpdate_new = tmpdate_new + timedelta(minutes=twin)
matching = np.unique(tmplst)
# check if download path exists if not create
if not os.path.exists(path_local):
os.makedirs(path_local,exist_ok=True)
# Download matching files
print ('Downloading ' + str(len(matching))
+ ' files: .... \n')
print ("Used number of possible simultaneous downloads "
+ str(nproc) + "!")
Parallel(n_jobs=nproc)(
delayed(tmploop_get_remote_files)(
i,matching,user,pw,server,
path_remote,path_local
) for i in range(len(matching))
)
# update time
tmpdate = datetime((tmpdate + relativedelta(years=+1)).year,
(tmpdate + relativedelta(years=+1)).month,1)
if filesort is True:
# sort files
print("Data is being sorted into subdirectories " \
+ "year and month ...")
filelst = [f for f in os.listdir(path_local)
if os.path.isfile(os.path.join(path_local,f))]
sort_files(path_local,filelst,product,sat)
print ('Files downloaded to: \n', path_local)
def get_remote_files_cci(\
sdate,edate,twin,nproc,sat,product,path_local,dict_for_sub):
'''
Download swath files from CCI and store them at defined
location.
'''
# credentials
server = satellite_dict[product]['src']['server']
level = satellite_dict[product]['processing_level']
user, pw = get_credentials(remoteHostName = server)
tmpdate = deepcopy(sdate)
filesort = False
while (tmpdate <= edate):
print(tmpdate)
# create remote path
path_template = satellite_dict[product]['src']\
['path_template']
strsublst = satellite_dict[product]['src']\
['strsub']
dict_for_sub['mission'] =\
satellite_dict[product]['mission'][sat]
subdict = make_subdict(strsublst,class_object_dict=dict_for_sub)
path_remote = make_pathtofile(path_template,\
strsublst,subdict,\
date=tmpdate)
if path_local is None:
# create local path
subdict['mission'] = sat
path_template = satellite_dict[product]['dst']\
['path_template']
strsublst = satellite_dict[product]['dst']\
['strsub']
path_local = make_pathtofile(path_template,\
strsublst,subdict,\
date=sdate)
filesort = True
print ('# ----- ')
print ('Chosen source: ')
print (sat + ' values from ' + product + ': ' + server)
print ('# ----- ')
# get list of accessable files
ftp = FTP(server)
ftp.login(user, pw)
ftp.cwd(path_remote)
content = FTP.nlst(ftp)
#choose files according to sdate/edate
tmplst = []
tmpdate_new = tmpdate-timedelta(minutes=twin)
tmpdate_end = edate+timedelta(minutes=twin)
while (tmpdate_new <= tmpdate_end):
if level == 'L2P':
matchingtmp = [s for s in content
if tmpdate_new.strftime('-%Y%m%dT%H')
in s ]
elif level == 'L3':
matchingtmp = [s for s in content
if tmpdate_new.strftime('-%Y%m%d')
in s ]
tmplst = tmplst + matchingtmp
tmpdate_new = tmpdate_new + timedelta(minutes=twin)
matching = np.unique(tmplst)
# check if download path exists if not create
if not os.path.exists(path_local):
os.makedirs(path_local,exist_ok=True)
# Download matching files
print ('Downloading ' + str(len(matching))
+ ' files: .... \n')
print ("Used number of simultaneous downloads "
+ str(nproc) + "!")
Parallel(n_jobs=nproc)(
delayed(tmploop_get_remote_files)(
i,matching,user,pw,server,
path_remote,path_local
) for i in range(len(matching))
)
# update time
tmpdate += timedelta(days=1)
if filesort is True:
# sort files
print("Data is being sorted into subdirectories " \
+ "year and month ...")
print(path_local)
filelst = [f for f in os.listdir(path_local)
if os.path.isfile(os.path.join(path_local,f))]
sort_files(path_local,filelst,product,sat)
print ('Files downloaded to: \n', path_local)
def get_remote_files_eumetsat(\
product,sdate,edate,api_url,sat,path_local,dict_for_sub):
'''
Download swath files from EUMETSAT and store them at defined
location. This fct uses the SentinelAPI for queries.
'''
import sentinelsat as ss
products = None
dates = (sdate.strftime('%Y-%m-%dT%H:%M:%SZ'),\
edate.strftime('%Y-%m-%dT%H:%M:%SZ'))
filesort = False
if path_local is None:
# create local path
path_template = satellite_dict[product]['dst']\
['path_template']
strsublst = satellite_dict[product]['dst']\
['strsub']
subdict = make_subdict(strsublst,
class_object_dict=dict_for_sub)
path_local = make_pathtofile(path_template,\
strsublst,
subdict,\
date=sdate)
filesort = True
kwargs = make_query_dict(product,sat)
if api_url is None:
api_url_lst = \
satellite_dict[product]['src']['api_url']
for url in api_url_lst:
print('Source:',url)
try:
user, pw = get_credentials(remoteHostName=url)
api = ss.SentinelAPI(user, pw, url)
products = api.query(area=None, date=dates,**kwargs)
break
except Exception as e:
if isinstance(e,ss.exceptions.ServerError):
print(e)
else:
user, pw = get_credentials(remoteHostName = api_url)
api = ss.SentinelAPI(user, pw, api_url)
products = api.query(area=None, date=dates,**kwargs)
if products is not None:
# check if download path exists if not create
if not os.path.exists(path_local):
os.makedirs(path_local,exist_ok=True)
api.download_all(products,directory_path=path_local)
#api.download(product_id)
else: print('No products found!')
if filesort is True:
# sort files
print("Data is being sorted into subdirectories " \
+ "year and month ...")
filelst = [f for f in os.listdir(path_local)
if os.path.isfile(os.path.join(path_local,f))]
sort_files(path_local,filelst,product,sat)
print ('Files downloaded to: \n', path_local)
def get_remote_files(path_local,sdate,edate,twin,
nproc,product,api_url,sat,dict_for_sub):
'''
Download swath files and store them at defined location.
It is currently possible to download L3 altimeter data from
CMEMS, L3 and L2P from CEDA CCI, and L2 from EUMETSAT,
as well as L2P from aviso+ for cfosat swim data.
'''
if product=='cmems_L3':
get_remote_files_cmems(sdate,edate,twin,nproc,\
sat,product,path_local,\
dict_for_sub)
elif product=='cfo_swim_L2P':
get_remote_files_aviso(sdate,edate,twin,nproc,\
sat,product,path_local,\
dict_for_sub)
elif product=='eumetsat_L2':
get_remote_files_eumetsat(product,sdate,edate,\
api_url,sat,path_local,\
dict_for_sub)
elif product=='cci_L2P' or product=='cci_L3':
get_remote_files_cci(sdate,edate,twin,nproc,\
sat,product,path_local,\
dict_for_sub)
def make_query_dict(product,sat):
'''
fct to setup queries of L2 data using SentinelAPI
'''
level = satellite_dict[product]['mission'].get('processing')
SAT = satellite_dict[product]['mission'].get(sat)
kwargs = {'platformname': 'Sentinel-3',
'instrumentshortname': 'SRAL',
'productlevel': level,
'filename': SAT + '*WAT*'}
return kwargs
def get_local_files(sdate,edate,twin,product,dict_for_sub=None,
path_local=None):
"""
Function to retrieve list of files/paths for available
locally stored satellite data. This list is used for
other functions to query and parsing.
param:
sdate - start date (datetime object)
edate - end date (datetime object)
twin - time window (temporal constraint) in minutes
product - roduct as of satellite_specs.yaml
dict_for_sub - dictionary for substitution in templates
local_path - a path if defined
return:
pathlst - list of paths
filelst - list of files
"""
filelst = []
pathlst = []
tmpdate = sdate-timedelta(minutes=twin)
if path_local is None:
print('path_local is None -> checking config file')
while (tmpdate <= edate + relativedelta(months=+1)):
try:
# create local path for each time
path_template = \
satellite_dict[product]['dst'].get(
'path_template')
strsublst = \
satellite_dict[product]['dst'].get('strsub')
subdict = \
make_subdict(strsublst,
class_object_dict=dict_for_sub)
path_local = make_pathtofile(path_template,\
strsublst,subdict)
path_local = (
os.path.join(
path_local,
tmpdate.strftime('%Y'),
tmpdate.strftime('%m'))
)
print(path_local)
if os.path.isdir(path_local):
tmplst = np.sort(os.listdir(path_local))
filelst.append(tmplst)
pathlst.append([os.path.join(path_local,e)
for e in tmplst])
tmpdate = tmpdate + relativedelta(months=+1)
path_local = None
except Exception as e:
print(e)
tmpdate = tmpdate + relativedelta(months=+1)
filelst = np.sort(flatten(filelst))
pathlst = np.sort(flatten(pathlst))
else:
filelst = np.sort(os.listdir(path_local))
pathlst = [os.path.join(path_local,e) for e in filelst]
idx_start,tmp = check_date(filelst, sdate - timedelta(minutes=twin))
tmp,idx_end = check_date(filelst, edate + timedelta(minutes=twin))
if idx_end == 0:
idx_end = len(pathlst)-1
del tmp
pathlst = np.unique(pathlst[idx_start:idx_end+1])
filelst = np.unique(filelst[idx_start:idx_end+1])
print (str(int(len(pathlst))) + " valid files found")
return pathlst, filelst
def read_local_ncfiles(pathlst,product,varalias,
sd,ed,twin,variable_info):
"""
Wrapping function to read satellite netcdf files.
param:
pathlst - list of paths to be parsed
product - product as specified in satellite_specs.yaml
varalias
sd - start date (datetime object)
ed - start date (datetime object)
twin - time window (temporal constraint) in minutes
variable_info - from variable_info.yaml
return:
dictionary of variables for the satellite_class object
"""
# adjust start and end
sd = sd - timedelta(minutes=twin)
ed = ed + timedelta(minutes=twin)
# get meta data
ncmeta = ncdumpMeta(pathlst[0])
ncvar = get_filevarname(varalias,variable_info,
satellite_dict[product],ncmeta)
# retrieve sliced data
ds = read_netcdfs(pathlst)
ds_sort = ds.sortby('time')
ds_sliced = ds_sort.sel(time=slice(sd, ed))
# make dict and start with stdvarname for varalias
stdvarname = variable_info[varalias]['standard_name']
var_sliced = ds_sliced[ncvar]
vardict = {}
vardict[stdvarname] = list(var_sliced.values)
# add coords to vardict
# 1. retrieve list of coordinates
coords_lst = list(var_sliced.coords.keys())
# 2. iterate over coords_lst
for varname in coords_lst:
stdcoordname = ds_sliced[varname].attrs['standard_name']
if stdcoordname == 'longitude':
vardict[stdcoordname] = \
list(((ds_sliced[varname].values - 180) % 360) - 180)
elif stdcoordname == 'time':
# convert to unixtime
df_time = ds_sliced[varname].to_dataframe()
unxt = (pd.to_datetime(df_time[varname]).view(int) / 10**9)
vardict[stdcoordname] = unxt.values
vardict['time_unit'] = variable_info[stdcoordname]['units']
else:
vardict[stdcoordname] = list(ds_sliced[varname].values)
return vardict
def unzip_eumetsat(pathlst,tmpdir):
"""
Function to unzip eumetsat files prior to reading
param:
pathlst - list of paths to zipped files
tmpdir - temporary folder to unzipped files
return:
pathlst_new - new list of paths to unzipped files
"""
for count, f in enumerate(pathlst):
zipped = zipfile.ZipFile(f)
enhanced_measurement = zipped.namelist()[-1]
extracted = zipped.extract(enhanced_measurement,
path=tmpdir.name)
fname = extracted.split('/')[-1]
dignumstr = '_{num:0' + str(len(str(len(pathlst)))) + 'd}.nc'
# cp extracted file to parent tmp
cmdstr = ('cp ' + extracted + ' ' + tmpdir.name
+ '/' + fname[0:-3]
+ dignumstr.format(num=count))
#+ '_{num:04d}.nc'.format(num=count))
os.system(cmdstr)
# delete subfolder
cmdstr = ('rm -r '
+ os.path.dirname(os.path.realpath(extracted)))
os.system(cmdstr)
flst = os.listdir(tmpdir.name)
pathlst_new = []
for f in flst:
pathlst_new.append(os.path.join(tmpdir.name,f))
return pathlst_new
def read_local_files_eumetsat(pathlst,product,varalias,satellite_dict):
'''
Read and concatenate all data to one timeseries for each variable.
Fct is tailored to EUMETSAT files.
'''
# --- find variable cf names --- #
print ("Processing " + str(int(len(pathlst))) + " files")
print (pathlst[0])
print (pathlst[-1])
# --- find ncvar cf names --- #
tmpdir = tempfile.TemporaryDirectory()
zipped = zipfile.ZipFile(pathlst[0])
enhanced_measurement = zipped.namelist()[-1]
extracted = zipped.extract(enhanced_measurement, path=tmpdir.name)
stdname = variable_info[varalias]['standard_name']
ncmeta = ncdumpMeta(extracted)
ncvar = get_filevarname(varalias,variable_info,
satellite_dict[product],ncmeta)
tmpdir.cleanup()
# --- create vardict --- #
vardict = {}
tmpdir = tempfile.TemporaryDirectory()
print('tmp directory is established:',tmpdir.name)
for f in tqdm(pathlst):
#path = f[0:-len(f.split('/')[-1])]
zipped = zipfile.ZipFile(f)
enhanced_measurement = zipped.namelist()[-1]
extracted = zipped.extract(enhanced_measurement,
path=tmpdir.name)
ds = xr.open_dataset(extracted)
ds_var = ds[ncvar]
if stdname in vardict.keys():
vardict[stdname] += list(ds[ncvar])
else:
vardict[stdname] = list(ds[ncvar])
coords_lst = list(ds_var.coords.keys())
for varname in coords_lst:
stdcoordname = ds[varname].attrs['standard_name']
if stdcoordname == 'longitude':
if stdcoordname in vardict.keys():
vardict[stdcoordname] += list(ds[varname].values)
else:
vardict[stdcoordname] = list(ds[varname].values)
elif stdcoordname == 'time':
# convert to unixtime
df_time = ds[varname].to_dataframe()
unxt = ( | pd.to_datetime(df_time[varname]) | pandas.to_datetime |
import re
import os
import sys
import math
import random
import numpy as np
import pandas as pd
from Bio import SeqIO
from PyDAIR.io.PyDAIRIO import *
from PyDAIR.utils.PyDAIRUtils import *
class PyDAIRSimGeneSet:
"""PyDAIRSimGeneSet class.
The class is for sampling sequences of single of V, D or J gene.
The sequence name and sequence which are read from FASTA file
are saved in this class. In addition, the sampling parameters are
saved in this class.
"""
def __init__(self, seq_name, seq_seq, prob, d5_len, d3_len):
self.name = seq_name
self.seq = seq_seq
self.prob = prob
self.d5_len = d5_len
self.d3_len = d3_len
def generate_seqs(self, n, seed = None):
"""Generate V/D/J segments.
Args:
n (int): The number of sequences should be generated.
seed (int): The seed used for random sampling.
"""
if seed is not None:
np.random.seed(seed)
sampled_id = list( | pd.Series(self.name) | pandas.Series |
import gym
from gym import spaces
from empyrical import max_drawdown, alpha_beta, sharpe_ratio, annual_return
import pandas as pd
import numpy as np
import typing
from datetime import datetime
import ray
from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG
# Start up Ray. This must be done before we instantiate any RL agents.
ray.init(num_cpus=40, ignore_reinit_error=True, log_to_driver=False)
config = DEFAULT_CONFIG.copy()
config["num_workers"] = 10
config["num_envs_per_worker"] = 5
config["rollout_fragment_length"] = 50
config["train_batch_size"] = 25000
config["batch_mode"] = "complete_episodes"
config["num_sgd_iter"] = 20
config["sgd_minibatch_size"] = 2000
config["model"]["dim"] = 200
config["model"]["conv_filters"] = [[16, [5, 1], 5], [16, [5, 1], 5], [16, [5, 1], 5]]
config[
"num_cpus_per_worker"
] = 2 # This avoids running out of resources in the notebook environment when this cell is re-executed
config["env_config"] = {
"pricing_source": "csvdata",
"tickers": [
"GOLD_",
"AAPL_",
],
"lookback": 200,
"start": "1995-01-02",
"end": "2015-12-31",
"features": [
"return_volatility_20",
"return_skewness_20",
"adjvolume_volatility_20",
],
"random_start": True,
"trading_days": 1000,
}
def load_data(
price_source: str,
tickers: typing.List[str],
start: datetime,
end: datetime,
features: typing.List[str],
):
"""Returned price data to use in gym environment"""
# Load data
# Each dataframe will have columns date and a collection of fields
# TODO: DataLoader from mongoDB
# Raw price from DB, forward impute on the trading days for missing date
# calculate the features (log return, volatility)
if price_source in ["csvdata"]:
feature_df = []
for t in tickers:
df1 = pd.read_csv("csvdata/{}.csv".format(t))
df1["datetime"] = | pd.to_datetime(df1["datetime"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, | u('q') | pandas.compat.u |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': | pd.StringDtype() | pandas.StringDtype |
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = pandas.concat(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_copy_numbers_from_reference_copy_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = pandas.DataFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not pandas.isna(FoldChange_match):
if not pandas.isna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def determine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].copy()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_df = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_df[Condition] = Data_R_df[Condition]*Data_R_df[mass_col]
Ribosomal_sum = Data_R_df[Condition].sum()
df = Data.loc[:, [Condition, mass_col, 'Location']]
df[Condition] = df[Condition]*df[mass_col]
out = pandas.DataFrame(df.groupby('Location').sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_sum
out.loc['Total', Condition] = out[Condition].sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.rename(columns={Condition: 'original_amino_acid_occupation'}, inplace=True)
out.drop(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_amino_acid_occupation'] / \
out['original_amino_acid_occupation']
return(out)
def determine_correction_factor_A(fractions_entirely_replaced_with_expected_value):
expected_fraction_sum = 0
for i in fractions_entirely_replaced_with_expected_value.keys():
expected_fraction_sum += fractions_entirely_replaced_with_expected_value[i]
factor = 1/(1-expected_fraction_sum)
return(factor)
def determine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def determine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.copy()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, merged_compartments):
out = input.copy()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in merged_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[merged_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def merge_compartments(input, merged_compartments):
out = input.copy()
for c in merged_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[merged_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.copy()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def determine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_summary, protein_data, condition, gene_id_col):
process_efficiencies = pandas.DataFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = sum([proteome_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_summary.loc['Total', 'original_amino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replaced_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, merged_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = determine_correction_factor_A(fractions_entirely_replaced_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replaced_with_expected_value})
factor_B = determine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, merged_compartments=merged_compartments)
out = merge_compartments(input=out, merged_compartments=merged_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = pandas.DataFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_mean_df = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_mean_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = pandas.DataFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_mean_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
mean_val = flux_mean_df.loc[flux_mean_df['ID'] == rx, condition].values[0]
if not pandas.isna(mean_val):
SE_val = flux_mean_SE.loc[flux_mean_SE['ID'] == str(rx+'_SE'), condition].values[0]
out.loc[rx, 'Reaction_ID'] = rx
if not pandas.isna(SE_val):
lb = mean_val-SE_val
ub = mean_val+SE_val
if mean_val < 0:
out.loc[rx, 'LB'] = lb
if ub > 0:
out.loc[rx, 'UB'] = 0
else:
out.loc[rx, 'UB'] = ub
elif mean_val > 0:
out.loc[rx, 'UB'] = ub
if lb < 0:
out.loc[rx, 'LB'] = 0
else:
out.loc[rx, 'LB'] = lb
else:
out.loc[rx, 'LB'] = lb
out.loc[rx, 'UB'] = ub
else:
out.loc[rx, 'LB'] = mean_val
out.loc[rx, 'UB'] = mean_val
flux_dir_df = input.loc[input['Type'] == 'Flux_Direction', :]
if specific_exchanges is None:
exchanges_to_set = list(flux_dir_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
out.loc[rx, 'Reaction_ID'] = rx
if flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 1:
out.loc[rx, 'LB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == -1:
out.loc[rx, 'UB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 0:
out.loc[rx, 'LB'] = 0
out.loc[rx, 'UB'] = 0
flux_upper_df = input.loc[input['Type'] == 'Flux_Upper_Bound', :]
for rx in list(flux_upper_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'UB'] = flux_upper_df.loc[flux_upper_df['ID'] == rx, condition].values[0]
flux_lower_df = input.loc[input['Type'] == 'Flux_Lower_Bound', :]
for rx in list(flux_lower_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'LB'] = flux_lower_df.loc[flux_lower_df['ID'] == rx, condition].values[0]
return(out)
def growth_Rate_from_input(input, condition):
return(input.loc[input['Type'] == 'Growth_Rate', condition].values[0])
def proteome_fractions_from_input(input, condition):
df = input.loc[input['Type'] == 'Expected_ProteomeFraction', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def medium_concentrations_from_input(input, condition):
df = input.loc[input['Type'] == 'Medium_Concentration', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def build_input_proteome_for_specific_kapp_estimation(proteomics_data, condition):
out = pandas.DataFrame()
out['ID'] = proteomics_data['ID']
out['copy_number'] = proteomics_data[condition]
return(out)
def inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=None, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame(columns=['Enzyme_ID','Kapp'])
default_kapps : {'default_kapp':value,'default_transporter_kapp':value}
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
if specific_kapps is not None:
parameterized = []
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = rba_session.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
count = 0
for e in rba_session.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
if default_kapps is not None:
if type(default_kapps) is dict:
rba_session.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapps['default_kapp']
rba_session.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = default_kapps['default_transporter_kapp']
if process_efficiencies is not None:
for i in process_efficiencies.index:
if process_efficiencies.loc[i, 'Process'] in rba_session.model.processes.processes._elements_by_id.keys():
if not | pandas.isna(process_efficiencies.loc[i, 'Value']) | pandas.isna |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import pickle
from datetime import datetime, date
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import StratifiedShuffleSplit
print('Loading raw data...')
train_users_path = '../data/airbnb/train_users.csv'
test_users_path = '../data/airbnb/test_users.csv'
sessions_path = '../data/airbnb/sessions.csv'
# Note: age_gender_bkts.csv and countries.csv files are not used.
# ######## Loading data #############
# train_users
df_train = pd.read_csv(train_users_path)
target = df_train['country_destination']
df_train = df_train.drop(['country_destination'], axis=1)
# test_users
df_test = pd.read_csv(test_users_path)
id_test = df_test['id']
# sessions
df_sessions = pd.read_csv(sessions_path)
df_sessions['id'] = df_sessions['user_id']
df_sessions = df_sessions.drop(['user_id'], axis=1)
#########Preparing Session data########
print('Working on Session data...')
#Filling nan with specific value ('NAN')
df_sessions.action = df_sessions.action.fillna('NAN')
df_sessions.action_type = df_sessions.action_type.fillna('NAN')
df_sessions.action_detail = df_sessions.action_detail.fillna('NAN')
df_sessions.device_type = df_sessions.device_type.fillna('NAN')
#Action values with low frequency are changed to 'OTHER'
act_freq = 100 #Threshold for frequency
act = dict(zip(*np.unique(df_sessions.action, return_counts=True)))
df_sessions.action = df_sessions.action.apply(lambda x: 'OTHER' if act[x] < act_freq else x)
#Computing value_counts. These are going to be used in the one-hot encoding
#based feature generation (following loop).
f_act = df_sessions.action.value_counts().argsort()
f_act_detail = df_sessions.action_detail.value_counts().argsort()
f_act_type = df_sessions.action_type.value_counts().argsort()
f_dev_type = df_sessions.device_type.value_counts().argsort()
#grouping session by id. We will compute features from all rows with the same id.
dgr_sess = df_sessions.groupby(['id'])
#Loop on dgr_sess to create all the features.
samples = []
cont = 0
ln = len(dgr_sess)
for g in dgr_sess:
if cont%10000 == 0:
print("%s from %s" %(cont, ln))
gr = g[1]
l = []
#the id
l.append(g[0])
#The actual first feature is the number of values.
l.append(len(gr))
sev = gr.secs_elapsed.fillna(0).values #These values are used later.
#action features
#(how many times each value occurs, numb of unique values, mean and std)
c_act = [0] * len(f_act)
for i,v in enumerate(gr.action.values):
c_act[f_act[v]] += 1
_, c_act_uqc = np.unique(gr.action.values, return_counts=True)
c_act += [len(c_act_uqc), np.mean(c_act_uqc), np.std(c_act_uqc)]
l = l + c_act
#action_detail features
#(how many times each value occurs, numb of unique values, mean and std)
c_act_detail = [0] * len(f_act_detail)
for i,v in enumerate(gr.action_detail.values):
c_act_detail[f_act_detail[v]] += 1
_, c_act_det_uqc = np.unique(gr.action_detail.values, return_counts=True)
c_act_detail += [len(c_act_det_uqc), np.mean(c_act_det_uqc), np.std(c_act_det_uqc)]
l = l + c_act_detail
#action_type features
#(how many times each value occurs, numb of unique values, mean and std
#+ log of the sum of secs_elapsed for each value)
l_act_type = [0] * len(f_act_type)
c_act_type = [0] * len(f_act_type)
for i,v in enumerate(gr.action_type.values):
l_act_type[f_act_type[v]] += sev[i]
c_act_type[f_act_type[v]] += 1
l_act_type = np.log(1 + np.array(l_act_type)).tolist()
_, c_act_type_uqc = np.unique(gr.action_type.values, return_counts=True)
c_act_type += [len(c_act_type_uqc), np.mean(c_act_type_uqc), np.std(c_act_type_uqc)]
l = l + c_act_type + l_act_type
#device_type features
#(how many times each value occurs, numb of unique values, mean and std)
c_dev_type = [0] * len(f_dev_type)
for i,v in enumerate(gr.device_type .values):
c_dev_type[f_dev_type[v]] += 1
c_dev_type.append(len(np.unique(gr.device_type.values)))
_, c_dev_type_uqc = np.unique(gr.device_type.values, return_counts=True)
c_dev_type += [len(c_dev_type_uqc), np.mean(c_dev_type_uqc), np.std(c_dev_type_uqc)]
l = l + c_dev_type
#secs_elapsed features
l_secs = [0] * 5
l_log = [0] * 15
if len(sev) > 0:
#Simple statistics about the secs_elapsed values.
l_secs[0] = np.log(1 + np.sum(sev))
l_secs[1] = np.log(1 + np.mean(sev))
l_secs[2] = np.log(1 + np.std(sev))
l_secs[3] = np.log(1 + np.median(sev))
l_secs[4] = l_secs[0] / float(l[1])
#Values are grouped in 15 intervals. Compute the number of values
#in each interval.
log_sev = np.log(1 + sev).astype(int)
l_log = np.bincount(log_sev, minlength=15).tolist()
l = l + l_secs + l_log
#The list l has the feature values of one sample.
samples.append(l)
cont += 1
#Creating a dataframe with the computed features
col_names = [] #name of the columns
for i in range(len(samples[0])-1):
col_names.append('c_' + str(i))
#preparing objects
samples = np.array(samples)
samp_ar = samples[:, 1:].astype(np.float16)
samp_id = samples[:, 0] #The first element in obs is the id of the sample.
#creating the dataframe
df_agg_sess = pd.DataFrame(samp_ar, columns=col_names)
df_agg_sess['id'] = samp_id
df_agg_sess.index = df_agg_sess.id
#########Working on train and test data#####################
print('Working on users data...')
#Concatenating df_train and df_test
df_tt = pd.concat((df_train, df_test), axis=0, ignore_index=True)
df_tt.index = df_tt.id
df_tt = df_tt.fillna(-1) #Inputing this kind of missing value with -1 (missing values in train and test)
df_tt = df_tt.replace('-unknown-', -1) #-unknown is another way of missing value, then = -1.
########Creating features for train+test
#Removing date_first_booking
df_tt = df_tt.drop(['date_first_booking'], axis=1)
#Number of nulls
df_tt['n_null'] = np.array([sum(r == -1) for r in df_tt.values])
#date_account_created
#(Computing year, month, day, week_number, weekday)
dac = np.vstack(df_tt.date_account_created.astype(str).apply(lambda x: list(map(int, x.split('-')))).values)
df_tt['dac_y'] = dac[:,0]
df_tt['dac_m'] = dac[:,1]
df_tt['dac_d'] = dac[:,2]
dac_dates = [datetime(x[0],x[1],x[2]) for x in dac]
df_tt['dac_wn'] = np.array([d.isocalendar()[1] for d in dac_dates])
df_tt['dac_w'] = np.array([d.weekday() for d in dac_dates])
df_tt_wd = pd.get_dummies(df_tt.dac_w, prefix='dac_w')
df_tt = df_tt.drop(['date_account_created', 'dac_w'], axis=1)
df_tt = pd.concat((df_tt, df_tt_wd), axis=1)
#timestamp_first_active
#(Computing year, month, day, hour, week_number, weekday)
tfa = np.vstack(df_tt.timestamp_first_active.astype(str).apply(lambda x: list(map(int, [x[:4],x[4:6],x[6:8],x[8:10],x[10:12],x[12:14]]))).values)
df_tt['tfa_y'] = tfa[:,0]
df_tt['tfa_m'] = tfa[:,1]
df_tt['tfa_d'] = tfa[:,2]
df_tt['tfa_h'] = tfa[:,3]
tfa_dates = [datetime(x[0],x[1],x[2],x[3],x[4],x[5]) for x in tfa]
df_tt['tfa_wn'] = np.array([d.isocalendar()[1] for d in tfa_dates])
df_tt['tfa_w'] = np.array([d.weekday() for d in tfa_dates])
df_tt_wd = | pd.get_dummies(df_tt.tfa_w, prefix='tfa_w') | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 23:20:33 2019
@author: MiaoLi
"""
#%%
import os, sys
import pandas as pd
import ast
# import seaborn as sns
from collections import OrderedDict
from shapely.geometry import Polygon, Point
sys.path.append('C:\\Users\\MiaoLi\\Desktop\\SCALab\\Programming\\crowdingnumerositygit\\GenerationAlgorithm\\VirtualEllipseFunc')
import m_defineEllipses
# import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import math
# import xlsxwriter
#%%=============================================================================
# list my raw data files
# =============================================================================
#parent directory
path = os.path.abspath(os.path.dirname(os.getcwd()))
files = os.listdir(path)
data_csv = [file for file in files if file.startswith('alternative') & file.endswith('csv')]
#%%=============================================================================
# import stimuli display
# =============================================================================
stimuliFile = '../Idea1_DESCO.xlsx'
stimuliInfo = pd.read_excel(stimuliFile)
posi_lists_temp = stimuliInfo['positions'].tolist()
posi_list=[]
for i in posi_lists_temp:
i = ast.literal_eval(i)# megic! remore ' ' of the str
posi_list.append(i)
#%%============================================================================
# inspect disks fall into others crowding zones
# =============================================================================
dic_count_in_crowdingZone = {}
list_count_in_crowdingZone = []
for indexPosiList in range(0,len(posi_list)):
display_posi = posi_list[indexPosiList]
#final ellipses
ellipses = []
for posi in display_posi:
e = m_defineEllipses.defineVirtualEllipses(posi, 0.25, 0.1)
ellipses.append(e)
# final_ellipses = list(set(ellipses)) #if order doest matter
final_ellipses = list(OrderedDict.fromkeys(ellipses)) #ordermatters
count_in_crowdingZone = 0
#crowding zones after reomve overlapping areas
for count, i in enumerate(final_ellipses, start = 1):
ellipsePolygon = m_defineEllipses.ellipseToPolygon([i])[0] #radial ellipse
# ellipsePolygonT = ellipseToPolygon([i])[1]#tangential ellipse
epPolygon = Polygon(ellipsePolygon)
# epPolygonT = Polygon(ellipsePolygonT)
# radial_area_dic[(i[0],i[1])] = [] #set the keys of the dictionary--taken_posi
for posi in display_posi:
if epPolygon.contains(Point(posi)) == True:
count_in_crowdingZone += 1
count_number_end = count_in_crowdingZone-len(display_posi)
dic_temp_item = {indexPosiList: count_number_end}
dic_count_in_crowdingZone.update(dic_temp_item)
list_count_in_crowdingZone.append(count_number_end)
stimuliInfo.insert(1, 'count_number',list_count_in_crowdingZone)
# stimuliInfo.to_excel('count_number.xlsx')
#%% =============================================================================
# count_number: increasing ellipse sizes
# =============================================================================
# for i in range(5):
# name='to_plot_nc'+str(i)
# locals()['to_plot_nc'+str(i)]=i
for i in range(1,31):
locals()['list_count_in_crowdingZone'+str(i)] = []
# list_count_in_crowdingZone1, \
# list_count_in_crowdingZone2, \
# list_count_in_crowdingZone3, \
# list_count_in_crowdingZone4, \
# list_count_in_crowdingZone5, \
# list_count_in_crowdingZone6, \
# list_count_in_crowdingZone7, \
# list_count_in_crowdingZone8, \
# list_count_in_crowdingZone9, \
# list_count_in_crowdingZone10, \
# list_count_in_crowdingZone11, \
# list_count_in_crowdingZone12, \
# list_count_in_crowdingZone13, \
# list_count_in_crowdingZone14, \
# list_count_in_crowdingZone15, \
# list_count_in_crowdingZone16, \
# list_count_in_crowdingZone17, \
# list_count_in_crowdingZone18, \
# list_count_in_crowdingZone19, \
# list_count_in_crowdingZone20, \
# list_count_in_crowdingZone21, \
# list_count_in_crowdingZone22, \
# list_count_in_crowdingZone23, \
# list_count_in_crowdingZone24, \
# list_count_in_crowdingZone25, \
# list_count_in_crowdingZone26, \
# list_count_in_crowdingZone27, \
# list_count_in_crowdingZone28, \
# list_count_in_crowdingZone29, \
# list_count_in_crowdingZone30 = ([] for i in range(30))
for indexPosiList, display_posi in enumerate(posi_list):
ellipse_110, ellipse_120, ellipse_130, ellipse_140, ellipse_150, \
ellipse_160, ellipse_170, ellipse_180, ellipse_190, ellipse_200, \
ellipse_210, ellipse_220, ellipse_230, ellipse_240, ellipse_250, \
ellipse_260, ellipse_270, ellipse_280, ellipse_290, ellipse_300, \
ellipse_310, ellipse_320, ellipse_330, ellipse_340, ellipse_350, \
ellipse_360, ellipse_370, ellipse_380, ellipse_390, ellipse_400 = ([] for i in range(30))
for posi in display_posi:
e1 = m_defineEllipses.defineVirtualEllipses(posi, 0.275, 0.11)
e2 = m_defineEllipses.defineVirtualEllipses(posi, 0.3, 0.12)
e3 = m_defineEllipses.defineVirtualEllipses(posi, 0.325, 0.13)
e4 = m_defineEllipses.defineVirtualEllipses(posi, 0.35, 0.14)
e5 = m_defineEllipses.defineVirtualEllipses(posi, 0.375, 0.15)
e6 = m_defineEllipses.defineVirtualEllipses(posi, 0.4, 0.16)
e7 = m_defineEllipses.defineVirtualEllipses(posi, 0.425, 0.17)
e8 = m_defineEllipses.defineVirtualEllipses(posi, 0.45, 0.18)
e9 = m_defineEllipses.defineVirtualEllipses(posi, 0.475, 0.19)
e10 = m_defineEllipses.defineVirtualEllipses(posi, 0.5, 0.20)
e11 = m_defineEllipses.defineVirtualEllipses(posi, 0.525, 0.21)
e12 = m_defineEllipses.defineVirtualEllipses(posi, 0.55, 0.22)
e13 = m_defineEllipses.defineVirtualEllipses(posi, 0.575, 0.23)
e14 = m_defineEllipses.defineVirtualEllipses(posi, 0.6, 0.24)
e15 = m_defineEllipses.defineVirtualEllipses(posi, 0.625, 0.25)
e16 = m_defineEllipses.defineVirtualEllipses(posi, 0.65, 0.26)
e17 = m_defineEllipses.defineVirtualEllipses(posi, 0.675, 0.27)
e18 = m_defineEllipses.defineVirtualEllipses(posi, 0.7, 0.28)
e19 = m_defineEllipses.defineVirtualEllipses(posi, 0.725, 0.29)
e20 = m_defineEllipses.defineVirtualEllipses(posi, 0.75, 0.30)
e21 = m_defineEllipses.defineVirtualEllipses(posi, 0.775, 0.31)
e22 = m_defineEllipses.defineVirtualEllipses(posi, 0.8, 0.32)
e23 = m_defineEllipses.defineVirtualEllipses(posi, 0.825, 0.33)
e24 = m_defineEllipses.defineVirtualEllipses(posi, 0.85, 0.34)
e25 = m_defineEllipses.defineVirtualEllipses(posi, 0.875, 0.35)
e26 = m_defineEllipses.defineVirtualEllipses(posi, 0.9, 0.36)
e27 = m_defineEllipses.defineVirtualEllipses(posi, 0.925, 0.37)
e28 = m_defineEllipses.defineVirtualEllipses(posi, 0.95, 0.38)
e29 = m_defineEllipses.defineVirtualEllipses(posi, 0.975, 0.39)
e30 = m_defineEllipses.defineVirtualEllipses(posi, 1, 0.40)
ellipse_110.append(e1)
ellipse_120.append(e2)
ellipse_130.append(e3)
ellipse_140.append(e4)
ellipse_150.append(e5)
ellipse_160.append(e6)
ellipse_170.append(e7)
ellipse_180.append(e8)
ellipse_190.append(e9)
ellipse_200.append(e10)
ellipse_210.append(e11)
ellipse_220.append(e12)
ellipse_230.append(e13)
ellipse_240.append(e14)
ellipse_250.append(e15)
ellipse_260.append(e16)
ellipse_270.append(e17)
ellipse_280.append(e18)
ellipse_290.append(e19)
ellipse_300.append(e20)
ellipse_310.append(e21)
ellipse_320.append(e22)
ellipse_330.append(e23)
ellipse_340.append(e24)
ellipse_350.append(e25)
ellipse_360.append(e26)
ellipse_370.append(e27)
ellipse_380.append(e28)
ellipse_390.append(e29)
ellipse_400.append(e30)
final_ellipses_110 = list(OrderedDict.fromkeys(ellipse_110))
final_ellipses_120 = list(OrderedDict.fromkeys(ellipse_120))
final_ellipses_130 = list(OrderedDict.fromkeys(ellipse_130))
final_ellipses_140 = list(OrderedDict.fromkeys(ellipse_140))
final_ellipses_150 = list(OrderedDict.fromkeys(ellipse_150))
final_ellipses_160 = list(OrderedDict.fromkeys(ellipse_160))
final_ellipses_170 = list(OrderedDict.fromkeys(ellipse_170))
final_ellipses_180 = list(OrderedDict.fromkeys(ellipse_180))
final_ellipses_190 = list(OrderedDict.fromkeys(ellipse_190))
final_ellipses_200 = list(OrderedDict.fromkeys(ellipse_200))
final_ellipses_210 = list(OrderedDict.fromkeys(ellipse_210))
final_ellipses_220 = list(OrderedDict.fromkeys(ellipse_220))
final_ellipses_230 = list(OrderedDict.fromkeys(ellipse_230))
final_ellipses_240 = list(OrderedDict.fromkeys(ellipse_240))
final_ellipses_250 = list(OrderedDict.fromkeys(ellipse_250))
final_ellipses_260 = list(OrderedDict.fromkeys(ellipse_260))
final_ellipses_270 = list(OrderedDict.fromkeys(ellipse_270))
final_ellipses_280 = list(OrderedDict.fromkeys(ellipse_280))
final_ellipses_290 = list(OrderedDict.fromkeys(ellipse_290))
final_ellipses_300 = list(OrderedDict.fromkeys(ellipse_300))
final_ellipses_310 = list(OrderedDict.fromkeys(ellipse_310))
final_ellipses_320 = list(OrderedDict.fromkeys(ellipse_320))
final_ellipses_330 = list(OrderedDict.fromkeys(ellipse_330))
final_ellipses_340 = list(OrderedDict.fromkeys(ellipse_340))
final_ellipses_350 = list(OrderedDict.fromkeys(ellipse_350))
final_ellipses_360 = list(OrderedDict.fromkeys(ellipse_360))
final_ellipses_370 = list(OrderedDict.fromkeys(ellipse_370))
final_ellipses_380 = list(OrderedDict.fromkeys(ellipse_380))
final_ellipses_390 = list(OrderedDict.fromkeys(ellipse_390))
final_ellipses_400 = list(OrderedDict.fromkeys(ellipse_400))
count_in_crowdingZone1 = count_in_crowdingZone2 = count_in_crowdingZone3= count_in_crowdingZone4 = count_in_crowdingZone5 = 0
count_in_crowdingZone6 = count_in_crowdingZone7 = count_in_crowdingZone8= count_in_crowdingZone9 = count_in_crowdingZone10= 0
count_in_crowdingZone11 = count_in_crowdingZone12 = count_in_crowdingZone13= count_in_crowdingZone14 = count_in_crowdingZone15 = 0
count_in_crowdingZone16 = count_in_crowdingZone17 = count_in_crowdingZone18= count_in_crowdingZone19 = count_in_crowdingZone20= 0
count_in_crowdingZone21 = count_in_crowdingZone22 = count_in_crowdingZone23= count_in_crowdingZone24 = count_in_crowdingZone25 = 0
count_in_crowdingZone26 = count_in_crowdingZone27 = count_in_crowdingZone28= count_in_crowdingZone29 = count_in_crowdingZone30= 0
for i110, i120, i130, i140, i150, i160, i170, i180, i190, i200, i210, i220, i230, i240, i250, i260, i270, i280, i290, i300, i310, i320, i330, i340, i350, i360, i370, i380, i390, i400, \
in zip(final_ellipses_110, final_ellipses_120, final_ellipses_130,final_ellipses_140,final_ellipses_150, \
final_ellipses_160, final_ellipses_170, final_ellipses_180, final_ellipses_190, final_ellipses_200, \
final_ellipses_210, final_ellipses_220, final_ellipses_230,final_ellipses_240,final_ellipses_250, \
final_ellipses_260, final_ellipses_270, final_ellipses_280, final_ellipses_290, final_ellipses_300,\
final_ellipses_310, final_ellipses_320, final_ellipses_330,final_ellipses_340,final_ellipses_350, \
final_ellipses_360, final_ellipses_370, final_ellipses_380, final_ellipses_390, final_ellipses_400):
ellipsePolygon110 = m_defineEllipses.ellipseToPolygon([i110])[0]
ellipsePolygon120 = m_defineEllipses.ellipseToPolygon([i120])[0]
ellipsePolygon130 = m_defineEllipses.ellipseToPolygon([i130])[0]
ellipsePolygon140 = m_defineEllipses.ellipseToPolygon([i140])[0]
ellipsePolygon150 = m_defineEllipses.ellipseToPolygon([i150])[0]
ellipsePolygon160 = m_defineEllipses.ellipseToPolygon([i160])[0]
ellipsePolygon170 = m_defineEllipses.ellipseToPolygon([i170])[0]
ellipsePolygon180 = m_defineEllipses.ellipseToPolygon([i180])[0]
ellipsePolygon190 = m_defineEllipses.ellipseToPolygon([i190])[0]
ellipsePolygon200 = m_defineEllipses.ellipseToPolygon([i200])[0]
ellipsePolygon210 = m_defineEllipses.ellipseToPolygon([i210])[0]
ellipsePolygon220 = m_defineEllipses.ellipseToPolygon([i220])[0]
ellipsePolygon230 = m_defineEllipses.ellipseToPolygon([i230])[0]
ellipsePolygon240 = m_defineEllipses.ellipseToPolygon([i240])[0]
ellipsePolygon250 = m_defineEllipses.ellipseToPolygon([i250])[0]
ellipsePolygon260 = m_defineEllipses.ellipseToPolygon([i260])[0]
ellipsePolygon270 = m_defineEllipses.ellipseToPolygon([i270])[0]
ellipsePolygon280 = m_defineEllipses.ellipseToPolygon([i280])[0]
ellipsePolygon290 = m_defineEllipses.ellipseToPolygon([i290])[0]
ellipsePolygon300 = m_defineEllipses.ellipseToPolygon([i300])[0]
ellipsePolygon310 = m_defineEllipses.ellipseToPolygon([i310])[0]
ellipsePolygon320 = m_defineEllipses.ellipseToPolygon([i320])[0]
ellipsePolygon330 = m_defineEllipses.ellipseToPolygon([i330])[0]
ellipsePolygon340 = m_defineEllipses.ellipseToPolygon([i340])[0]
ellipsePolygon350 = m_defineEllipses.ellipseToPolygon([i350])[0]
ellipsePolygon360 = m_defineEllipses.ellipseToPolygon([i360])[0]
ellipsePolygon370 = m_defineEllipses.ellipseToPolygon([i370])[0]
ellipsePolygon380 = m_defineEllipses.ellipseToPolygon([i380])[0]
ellipsePolygon390 = m_defineEllipses.ellipseToPolygon([i390])[0]
ellipsePolygon400 = m_defineEllipses.ellipseToPolygon([i400])[0]
epPolygon110 = Polygon(ellipsePolygon110)
epPolygon120 = Polygon(ellipsePolygon120)
epPolygon130 = Polygon(ellipsePolygon130)
epPolygon140 = Polygon(ellipsePolygon140)
epPolygon150 = Polygon(ellipsePolygon150)
epPolygon160 = Polygon(ellipsePolygon160)
epPolygon170 = Polygon(ellipsePolygon170)
epPolygon180 = Polygon(ellipsePolygon180)
epPolygon190 = Polygon(ellipsePolygon190)
epPolygon200 = Polygon(ellipsePolygon200)
epPolygon210 = Polygon(ellipsePolygon210)
epPolygon220 = Polygon(ellipsePolygon220)
epPolygon230 = Polygon(ellipsePolygon230)
epPolygon240 = Polygon(ellipsePolygon240)
epPolygon250 = Polygon(ellipsePolygon250)
epPolygon260 = Polygon(ellipsePolygon260)
epPolygon270 = Polygon(ellipsePolygon270)
epPolygon280 = Polygon(ellipsePolygon280)
epPolygon290 = Polygon(ellipsePolygon290)
epPolygon300 = Polygon(ellipsePolygon300)
epPolygon310 = Polygon(ellipsePolygon310)
epPolygon320 = Polygon(ellipsePolygon320)
epPolygon330 = Polygon(ellipsePolygon330)
epPolygon340 = Polygon(ellipsePolygon340)
epPolygon350 = Polygon(ellipsePolygon350)
epPolygon360 = Polygon(ellipsePolygon360)
epPolygon370 = Polygon(ellipsePolygon370)
epPolygon380 = Polygon(ellipsePolygon380)
epPolygon390 = Polygon(ellipsePolygon390)
epPolygon400 = Polygon(ellipsePolygon400)
for posi in display_posi:
if epPolygon110.contains(Point(posi)) == True:
count_in_crowdingZone1 += 1
if epPolygon120.contains(Point(posi)) == True:
count_in_crowdingZone2 += 1
if epPolygon130.contains(Point(posi)) == True:
count_in_crowdingZone3 += 1
if epPolygon140.contains(Point(posi)) == True:
count_in_crowdingZone4 += 1
if epPolygon150.contains(Point(posi)) == True:
count_in_crowdingZone5 += 1
if epPolygon160.contains(Point(posi)) == True:
count_in_crowdingZone6 += 1
if epPolygon170.contains(Point(posi)) == True:
count_in_crowdingZone7 += 1
if epPolygon180.contains(Point(posi)) == True:
count_in_crowdingZone8 += 1
if epPolygon190.contains(Point(posi)) == True:
count_in_crowdingZone9 += 1
if epPolygon200.contains(Point(posi)) == True:
count_in_crowdingZone10 += 1
if epPolygon210.contains(Point(posi)) == True:
count_in_crowdingZone11 += 1
if epPolygon220.contains(Point(posi)) == True:
count_in_crowdingZone12 += 1
if epPolygon230.contains(Point(posi)) == True:
count_in_crowdingZone13 += 1
if epPolygon240.contains(Point(posi)) == True:
count_in_crowdingZone14 += 1
if epPolygon250.contains(Point(posi)) == True:
count_in_crowdingZone15 += 1
if epPolygon260.contains(Point(posi)) == True:
count_in_crowdingZone16 += 1
if epPolygon270.contains(Point(posi)) == True:
count_in_crowdingZone17 += 1
if epPolygon280.contains(Point(posi)) == True:
count_in_crowdingZone18 += 1
if epPolygon290.contains(Point(posi)) == True:
count_in_crowdingZone19 += 1
if epPolygon300.contains(Point(posi)) == True:
count_in_crowdingZone20 += 1
if epPolygon310.contains(Point(posi)) == True:
count_in_crowdingZone21 += 1
if epPolygon320.contains(Point(posi)) == True:
count_in_crowdingZone22 += 1
if epPolygon330.contains(Point(posi)) == True:
count_in_crowdingZone23 += 1
if epPolygon340.contains(Point(posi)) == True:
count_in_crowdingZone24 += 1
if epPolygon350.contains(Point(posi)) == True:
count_in_crowdingZone25 += 1
if epPolygon360.contains(Point(posi)) == True:
count_in_crowdingZone26 += 1
if epPolygon370.contains(Point(posi)) == True:
count_in_crowdingZone27 += 1
if epPolygon380.contains(Point(posi)) == True:
count_in_crowdingZone28 += 1
if epPolygon390.contains(Point(posi)) == True:
count_in_crowdingZone29 += 1
if epPolygon400.contains(Point(posi)) == True:
count_in_crowdingZone30 += 1
count_number_end1 = count_in_crowdingZone1 -len(display_posi)
count_number_end2 = count_in_crowdingZone2 -len(display_posi)
count_number_end3 = count_in_crowdingZone3 -len(display_posi)
count_number_end4 = count_in_crowdingZone4 -len(display_posi)
count_number_end5 = count_in_crowdingZone5 -len(display_posi)
count_number_end6 = count_in_crowdingZone6 -len(display_posi)
count_number_end7 = count_in_crowdingZone7 -len(display_posi)
count_number_end8 = count_in_crowdingZone8 -len(display_posi)
count_number_end9 = count_in_crowdingZone9 -len(display_posi)
count_number_end10 = count_in_crowdingZone10-len(display_posi)
count_number_end11 = count_in_crowdingZone11-len(display_posi)
count_number_end12 = count_in_crowdingZone12-len(display_posi)
count_number_end13 = count_in_crowdingZone13-len(display_posi)
count_number_end14 = count_in_crowdingZone14-len(display_posi)
count_number_end15 = count_in_crowdingZone15-len(display_posi)
count_number_end16 = count_in_crowdingZone16-len(display_posi)
count_number_end17 = count_in_crowdingZone17-len(display_posi)
count_number_end18 = count_in_crowdingZone18-len(display_posi)
count_number_end19 = count_in_crowdingZone19-len(display_posi)
count_number_end20 = count_in_crowdingZone20-len(display_posi)
count_number_end21 = count_in_crowdingZone21-len(display_posi)
count_number_end22 = count_in_crowdingZone22-len(display_posi)
count_number_end23 = count_in_crowdingZone23-len(display_posi)
count_number_end24 = count_in_crowdingZone24-len(display_posi)
count_number_end25 = count_in_crowdingZone25-len(display_posi)
count_number_end26 = count_in_crowdingZone26-len(display_posi)
count_number_end27 = count_in_crowdingZone27-len(display_posi)
count_number_end28 = count_in_crowdingZone28-len(display_posi)
count_number_end29 = count_in_crowdingZone29-len(display_posi)
count_number_end30 = count_in_crowdingZone30-len(display_posi)
list_count_in_crowdingZone1.append(count_number_end1)
list_count_in_crowdingZone2.append(count_number_end2)
list_count_in_crowdingZone3.append(count_number_end3)
list_count_in_crowdingZone4.append(count_number_end4)
list_count_in_crowdingZone5.append(count_number_end5)
list_count_in_crowdingZone6.append(count_number_end6)
list_count_in_crowdingZone7.append(count_number_end7)
list_count_in_crowdingZone8.append(count_number_end8)
list_count_in_crowdingZone9.append(count_number_end9)
list_count_in_crowdingZone10.append(count_number_end10)
list_count_in_crowdingZone11.append(count_number_end11)
list_count_in_crowdingZone12.append(count_number_end12)
list_count_in_crowdingZone13.append(count_number_end13)
list_count_in_crowdingZone14.append(count_number_end14)
list_count_in_crowdingZone15.append(count_number_end15)
list_count_in_crowdingZone16.append(count_number_end16)
list_count_in_crowdingZone17.append(count_number_end17)
list_count_in_crowdingZone18.append(count_number_end18)
list_count_in_crowdingZone19.append(count_number_end19)
list_count_in_crowdingZone20.append(count_number_end20)
list_count_in_crowdingZone21.append(count_number_end21)
list_count_in_crowdingZone22.append(count_number_end22)
list_count_in_crowdingZone23.append(count_number_end23)
list_count_in_crowdingZone24.append(count_number_end24)
list_count_in_crowdingZone25.append(count_number_end25)
list_count_in_crowdingZone26.append(count_number_end26)
list_count_in_crowdingZone27.append(count_number_end27)
list_count_in_crowdingZone28.append(count_number_end28)
list_count_in_crowdingZone29.append(count_number_end29)
list_count_in_crowdingZone30.append(count_number_end30)
stimuliInfo.insert(1, 'count_number1',list_count_in_crowdingZone1)
stimuliInfo.insert(1, 'count_number2',list_count_in_crowdingZone2)
stimuliInfo.insert(1, 'count_number3',list_count_in_crowdingZone3)
stimuliInfo.insert(1, 'count_number4',list_count_in_crowdingZone4)
stimuliInfo.insert(1, 'count_number5',list_count_in_crowdingZone5)
stimuliInfo.insert(1, 'count_number6',list_count_in_crowdingZone6)
stimuliInfo.insert(1, 'count_number7',list_count_in_crowdingZone7)
stimuliInfo.insert(1, 'count_number8',list_count_in_crowdingZone8)
stimuliInfo.insert(1, 'count_number9',list_count_in_crowdingZone9)
stimuliInfo.insert(1, 'count_number10',list_count_in_crowdingZone10)
stimuliInfo.insert(1, 'count_number11',list_count_in_crowdingZone11)
stimuliInfo.insert(1, 'count_number12',list_count_in_crowdingZone12)
stimuliInfo.insert(1, 'count_number13',list_count_in_crowdingZone13)
stimuliInfo.insert(1, 'count_number14',list_count_in_crowdingZone14)
stimuliInfo.insert(1, 'count_number15',list_count_in_crowdingZone15)
stimuliInfo.insert(1, 'count_number16',list_count_in_crowdingZone16)
stimuliInfo.insert(1, 'count_number17',list_count_in_crowdingZone17)
stimuliInfo.insert(1, 'count_number18',list_count_in_crowdingZone18)
stimuliInfo.insert(1, 'count_number19',list_count_in_crowdingZone19)
stimuliInfo.insert(1, 'count_number20',list_count_in_crowdingZone20)
stimuliInfo.insert(1, 'count_number21',list_count_in_crowdingZone21)
stimuliInfo.insert(1, 'count_number22',list_count_in_crowdingZone22)
stimuliInfo.insert(1, 'count_number23',list_count_in_crowdingZone23)
stimuliInfo.insert(1, 'count_number24',list_count_in_crowdingZone24)
stimuliInfo.insert(1, 'count_number25',list_count_in_crowdingZone25)
stimuliInfo.insert(1, 'count_number26',list_count_in_crowdingZone26)
stimuliInfo.insert(1, 'count_number27',list_count_in_crowdingZone27)
stimuliInfo.insert(1, 'count_number28',list_count_in_crowdingZone28)
stimuliInfo.insert(1, 'count_number29',list_count_in_crowdingZone29)
stimuliInfo.insert(1, 'count_number30',list_count_in_crowdingZone30)
stimuliInfo.to_excel('try1.xlsx')
stimuliInfo_C = stimuliInfo[stimuliInfo.crowdingcons == 1]
stimuliInfo_NC = stimuliInfo[stimuliInfo.crowdingcons == 0]
#%%=============================================================================
# pd dataFrame add csv
# =============================================================================
totalData = pd.DataFrame()
for i in data_csv:
data_exp1 = | pd.read_csv('../'+i) | pandas.read_csv |
import pandas as pd
def interpolation_imputation(filename):
df = pd.read_csv(filename)
filled = | pd.DataFrame() | pandas.DataFrame |
import collections
import os
import traceback
from datetime import datetime, timedelta
import pandas as pd
from openpyxl.styles import PatternFill
import config
from openpyxl import load_workbook
import numpy as np
import xlrd
def get_date_index(date, dates_values, lookback_index=0):
if isinstance(dates_values[0], str):
dates_values = [datetime.strptime(x, '%Y-%m-%d') for x in dates_values]
elif isinstance(dates_values[0], np.datetime64):
dates_values = [x.astype('M8[ms]').astype('O') for x in dates_values]
if len(dates_values) > 1:
if dates_values[0] > dates_values[1]: # if dates decreasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item < date), 0)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item <= adjusted_lookback), 0)
return date_index + lookback_index
else: # if dates increasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item > date), -1)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item > adjusted_lookback), -1)
return date_index - lookback_index # TODO Fix lookback index is a date here, convert before calling method
else:
return 0
def slice_series_dates(series, from_date, to_date):
date_idx_from = get_date_index(from_date, series.index)
date_idx_to = get_date_index(to_date, series.index)
return series[date_idx_from:date_idx_to]
def save_into_csv(filename, df, sheet_name='Sheet1', startrow=None,
overwrite_sheet=False, concat=False,
**to_excel_kwargs):
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
try:
# try to open an existing workbook
writer.book = load_workbook(filename)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# TODO Not working yet
if concat and sheet_name in writer.book.sheetnames:
try:
sheet_df = pd.read_excel(filename, sheet_name,
index_col=[0, 1, 2] if config.balance_sheet_name in sheet_name else [0, 1])
print(sheet_df.to_string())
idx = writer.book.sheetnames.index(sheet_name)
writer.book.remove(writer.book.worksheets[idx])
writer.book.create_sheet(sheet_name, idx)
df = pd.concat([df, sheet_df], axis=1)
df = df.reindex(sorted(df.columns, reverse=True), axis=1)
except Exception:
traceback.print_exc()
# truncate sheet
if overwrite_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title: ws for ws in writer.book.worksheets}
except FileNotFoundError:
# file does not exist yet, we will create it
pass
if startrow is None:
startrow = 0
# write out the new sheet
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
# save the workbook
writer.save()
def read_df_from_csv(path, sheet_name='Sheet1'):
if os.path.exists(path):
workbook = xlrd.open_workbook(path, on_demand=True)
sheets = workbook.sheet_names()
if sheet_name not in sheets:
return pd.DataFrame()
else:
xls = pd.ExcelFile(path)
return pd.read_excel(xls, sheet_name, index_col=0)
return pd.DataFrame()
def read_entry_from_pickle(path, x, y, lookback_index=0):
if os.path.exists(path):
df: pd.DataFrame = pd.read_pickle(filepath_or_buffer=path)
if isinstance(y, datetime): # if the input is a date...
date_index = get_date_index(date=y, dates_values=df.index.values, lookback_index=lookback_index)
return df[x].iloc[date_index]
elif isinstance(x, datetime):
date_index = get_date_index(date=x, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
for el in list(y):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
return np.nan
return reduced_df
elif isinstance(y, list) and isinstance(y[0], datetime):
to_return = pd.Series()
for date in y:
date_index = get_date_index(date=date, dates_values=df.index, lookback_index=lookback_index)
reduced_df = df.iloc[date_index, :]
for el in ([x] if not isinstance(x, list) else x):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
to_return[date] = np.nan
to_return[date] = reduced_df
return to_return
elif isinstance(x, list) and isinstance(x[0], datetime):
to_return = pd.Series()
for date in x:
date_index = get_date_index(date=date, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
if reduced_df.index.isin([tuple(y)]).any():
reduced_df = reduced_df.loc[tuple(y)]
to_return[date] = reduced_df
else:
to_return[date] = np.nan
return to_return
else:
return df[x].loc[y]
else:
return np.nan
def read_entry_from_csv(path, x, y, sheet_name='Sheet1', lookback_index=0, skip_first_sheet=False):
if os.path.exists(path):
# ticker = Path(path).stem
if config.balance_sheet_name in sheet_name:
index_col = [0, 1, 2]
elif config.income_statement_name in sheet_name or config.cash_flow_statement_name in sheet_name:
index_col = [0, 1]
else:
index_col = [0]
df = pd.read_excel(pd.ExcelFile(path), sheet_name, index_col=index_col)
if isinstance(y, datetime): # if the input is a date...
# if isinstance(df.index, pd.DatetimeIndex):
date_index = get_date_index(date=y, dates_values=df.index.values, lookback_index=lookback_index)
# print('The {} for {} on {}, lookback {}, is {}'.format(x, ticker, y, lookback_index, df[x].iloc[date_index]))
return df[x].iloc[date_index]
elif isinstance(x, datetime):
date_index = get_date_index(date=x, dates_values=df.columns, lookback_index=lookback_index)
reduced_df = df.iloc[:, date_index]
for el in list(y):
if el in reduced_df.index:
reduced_df = reduced_df.loc[el]
else:
# print('The {} for {} on {}, lookback {}, is {}'.format(y, ticker, x, lookback_index, np.nan))
return np.nan
# print('The {} for {} on {}, lookback {}, is {}'.format(y, ticker, x, lookback_index, reduced_df))
return reduced_df
else:
# print('The {}/{} for {} is {}'.format(x, y, ticker, df[x].loc[y]))
return df[x].loc[y]
else:
# print('The entry is {}'.format(np.nan))
return np.nan
def read_dates_from_csv(path, sheet_name):
if os.path.exists(path):
sheets = xlrd.open_workbook(path, on_demand=True).sheet_names()
if sheet_name not in sheets:
return []
with open(path, "r") as csv:
xls = | pd.ExcelFile(path) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
import os
import glob
import pandas as pd
import numpy as np
from collections import Counter
from graphpype.utils_net import read_Pajek_corres_nodes
from graphpype.utils_dtype_coord import where_in_coords
from graphpype.utils_cor import where_in_labels
from graphpype.utils_mod import read_lol_file
from graphpype.utils_mod import get_modularity_value_from_lol_file
from graphpype.utils_mod import get_values_from_global_info_file
from graphpype.utils_mod import get_path_length_from_info_dists_file
def glob_natural_sorted(reg_exp):
# TODO -> utils.py
"""sort reg_exp filenames in natural way (for numbers)"""
print(reg_exp)
files = glob.glob(reg_exp)
print(len(files))
natural_sorted_files = [reg_exp.replace(
'*', str(i), -1) for i in range(len(files))]
return natural_sorted_files, list(range(len(files)))
def compute_rada_df(iter_path, df, radatools_version="3.2", mapflow=[],
mapflow_name=""):
"""gather rada """
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
elif radatools_version == "5.0":
net_prop_dir = "net_prop"
elif radatools_version == "run":
net_prop_dir = ""
else:
print("Warning, could not find radatools_version {}"
.format(radatools_version))
return
# modularity
if len(mapflow) == 0:
modularity_file = os.path.join(
iter_path, "community_rada", "Z_List.lol")
print(modularity_file)
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
df['Modularity'] = mod_val
print(df)
# info_global
global_info_file = os.path.join(
iter_path, net_prop_dir, "Z_List-info_global.txt")
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(
global_info_file)
df.update(global_info_values)
# info_dists
path_length_file = os.path.join(
iter_path, net_prop_dir, "Z_List-info_dists.txt")
if os.path.exists(path_length_file):
mean_path_length, diameter, global_efficiency = \
get_path_length_from_info_dists_file(path_length_file)
df['Mean_path_length'] = str(mean_path_length)
df['Diameter'] = str(diameter)
df['Global_efficiency'] = str(global_efficiency)
else:
print("Could not find file {}".format(path_length_file))
else:
df['Modularity'] = []
df[mapflow_name] = []
df['Mean_path_length'] = []
df['Diameter'] = []
df['Global_efficiency'] = []
for i, cond in enumerate(mapflow):
df[mapflow_name].append(cond)
modularity_file = os.path.join(
iter_path, "community_rada", "mapflow",
"_community_rada"+str(i), "Z_List.lol")
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
df['Modularity'].append(mod_val)
else:
print("Missing modularity file {}".format(modularity_file))
df['Modularity'].append(np.nan)
# info_global
global_info_file = os.path.join(
iter_path, net_prop_dir, "mapflow", "_" + net_prop_dir+str(i),
"Z_List-info_global.txt")
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(
global_info_file)
for key, value in global_info_values.items():
if key not in list(df.keys()):
df[key] = []
df[key].append(value)
# info_dists
path_length_file = os.path.join(
iter_path, net_prop_dir, "mapflow", "_" + net_prop_dir+str(i),
"Z_List-info_dists.txt")
if os.path.exists(path_length_file):
mean_path_length, diameter, global_efficiency = \
get_path_length_from_info_dists_file(path_length_file)
df['Mean_path_length'].append(str(mean_path_length))
df['Diameter'].append(str(diameter))
df['Global_efficiency'].append(str(global_efficiency))
else:
df['Mean_path_length'].append(str(np.nan))
df['Diameter'].append(str(np.nan))
df['Global_efficiency'].append(str(np.nan))
def compute_nodes_rada_df(
local_dir, gm_coords=[], coords_file="", gm_labels=[], labels_file="",
radatools_version="3.2", mapflow=[], mapflow_name=""):
"""node properties df"""
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
elif radatools_version == "5.0":
net_prop_dir = "net_prop"
elif radatools_version == "run":
net_prop_dir = ""
else:
print("Warning, could not find radatools_version {}"
.format(radatools_version))
return
list_df = []
if len(mapflow) == 0:
Pajek_file = os.path.join(local_dir, net_prop_dir, "Z_List.net")
if os.path.exists(Pajek_file):
columns = []
columns_names = []
# nodes in the connected graph
node_corres = read_Pajek_corres_nodes(Pajek_file)
print(os.path.exists(coords_file))
if os.path.exists(coords_file) and len(gm_coords):
# MNI coordinates
coords = np.array(np.loadtxt(coords_file), dtype=int)
# node_coords
node_coords = coords[node_corres, :]
# where_in_gm_mask
where_in_gm_mask = where_in_coords(node_coords, gm_coords)
where_in_gm_mask = where_in_gm_mask.reshape(
where_in_gm_mask.shape[0], 1)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
if os.path.exists(labels_file):
labels = np.array(
[line.strip() for line in open(labels_file)],
dtype=str)
node_labels = labels[node_corres].reshape(-1, 1)
columns.append(node_coords)
columns_names.append('labels')
columns.append(node_coords)
columns_names.expend(['MNI_x', 'MNI_y', 'MNI_z'])
elif os.path.exists(labels_file) and len(gm_labels):
# TODO
labels = np.array([line.strip() for line in open(labels_file)],
dtype=str)
node_labels = labels[node_corres].reshape(-1, 1)
where_in_gm_mask = where_in_labels(node_labels, labels)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
columns.append(node_labels)
columns_names.append('labels')
0/0
elif len(gm_labels):
node_labels = np.array(gm_labels)[node_corres].reshape(-1, 1)
where_in_gm_mask = where_in_labels(node_labels,
gm_labels).reshape(-1, 1)
print(node_labels)
print(where_in_gm_mask)
columns.append(where_in_gm_mask)
columns_names.append('Where_in_GM_mask')
columns.append(node_labels)
columns_names.append('labels')
else:
print("No labels, no coords")
columns.append(node_corres)
columns_names.append('node_corres')
print(columns)
print(columns_names)
list_df.append(pd.DataFrame(
np.concatenate(tuple(columns), axis=1),
columns=columns_names))
else:
print("Missing {}".format(Pajek_file))
info_nodes_file = os.path.join(
local_dir, net_prop_dir, "Z_List-info_nodes.txt")
print(info_nodes_file)
if os.path.exists(info_nodes_file):
# loading info_nodes
df_node_info = pd.read_table(info_nodes_file)
list_df.append(df_node_info)
# modules /community_vect
partition_file = os.path.join(local_dir, "community_rada",
"Z_List.lol")
if os.path.exists(partition_file):
# loading partition_file
community_vect = read_lol_file(partition_file)
list_df.append(pd.DataFrame(community_vect, columns=['Module']))
# node roles
roles_file = os.path.join(local_dir, "node_roles", "node_roles.txt")
part_coeff_file = os.path.join(
local_dir, "node_roles", "all_participation_coeff.txt")
Z_com_degree_file = os.path.join(
local_dir, "node_roles", "all_Z_com_degree.txt")
if os.path.exists(roles_file) and os.path.exists(part_coeff_file) and \
os.path.exists(Z_com_degree_file):
# loding node roles
node_roles = np.array(np.loadtxt(roles_file), dtype=int)
part_coeff = np.loadtxt(part_coeff_file)
part_coeff = part_coeff.reshape(part_coeff.shape[0], 1)
Z_com_degree = np.loadtxt(Z_com_degree_file)
Z_com_degree = Z_com_degree.reshape(Z_com_degree.shape[0], 1)
list_df.append(pd.DataFrame(
np.concatenate((node_roles, part_coeff, Z_com_degree), axis=1),
columns=['Role_quality', 'Role_quantity',
'Participation_coefficient', 'Z_community_degree']))
# ndi values
ndi_values_file = os.path.join(
local_dir, "node_roles", "ndi_values.txt")
if os.path.exists(ndi_values_file):
# loding node roles
ndi_values = np.array(np.loadtxt(ndi_values_file))
list_df.append(pd.DataFrame(ndi_values,
columns=['Node_Dissociation_Index']))
else:
# Multiple files (mapflow)
for i, cond in enumerate(mapflow):
list_strip_df = []
Pajek_file = os.path.join(local_dir, "prep_rada", "mapflow",
"_prep_rada"+str(i), "Z_List.net")
if os.path.exists(coords_file) and os.path.exists(Pajek_file) and \
os.path.exists(labels_file):
# labels
labels = np.array([line.strip() for line in open(labels_file)],
dtype=str)
# MNI coordinates
coords = np.array(np.loadtxt(coords_file), dtype=int)
# nodes in the connected graph
node_corres = read_Pajek_corres_nodes(Pajek_file)
# node_coords
node_coords = coords[node_corres, :]
node_labels = labels[node_corres].reshape(-1, 1)
# where_in_gm_mask
where_in_gm_mask = where_in_coords(node_coords, gm_coords)
where_in_gm_mask = where_in_gm_mask.reshape(
where_in_gm_mask.shape[0], 1)
# print where_in_gm_mask
print(where_in_gm_mask.shape)
list_strip_df.append(pd.DataFrame(
np.concatenate((where_in_gm_mask, node_labels,
node_coords),
axis=1),
columns=['Where_in_GM_mask', 'labels', 'MNI_x', 'MNI_y',
'MNI_z']))
else:
if not os.path.exists(coords_file):
print("Missing {}".format(coords_file))
if not os.path.exists(Pajek_file):
print("Missing {}".format(Pajek_file))
if not os.path.exists(labels_file):
print("Missing {}".format(labels_file))
info_nodes_file = os.path.join(
local_dir, net_prop_dir, "Z_List-info_nodes.txt")
print(info_nodes_file)
if os.path.exists(info_nodes_file):
# loading info_nodes
df_node_info = | pd.read_table(info_nodes_file) | pandas.read_table |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import sys
import argparse
model_artifact_name = "3-stage-nn"
parser = argparse.ArgumentParser(description='Training 3-Stage NN')
parser.add_argument('input', metavar='INPUT',
help='Input folder', default=".")
parser.add_argument('output', metavar='OUTPUT',
help='Output folder', default=".")
parser.add_argument('--batch-size', type=int, default=256,
help='Batch size')
args = parser.parse_args()
input_folder = args.input
output_folder = args.output
import os
os.makedirs(f'{output_folder}/model', exist_ok=True)
os.makedirs(f'{output_folder}/interim', exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from scipy.sparse.csgraph import connected_components
from umap import UMAP
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
import os
import copy
import seaborn as sns
import time
from sklearn import preprocessing
from sklearn.metrics import log_loss
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA,FactorAnalysis
from sklearn.manifold import TSNE
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
print(torch.cuda.is_available())
import warnings
# warnings.filterwarnings('ignore')
# In[ ]:
torch.__version__
# In[ ]:
NB = '25'
IS_TRAIN = True
MODEL_DIR = f"{output_folder}/model" # "../model"
INT_DIR = f"{output_folder}/interim" # "../interim"
NSEEDS = 5 # 5
DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu')
EPOCHS = 15
BATCH_SIZE = args.batch_size
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-5
EARLY_STOPPING_STEPS = 10
EARLY_STOP = False
NFOLDS = 5 # 5
PMIN = 0.0005
PMAX = 0.9995
SMIN = 0.0
SMAX = 1.0
# In[ ]:
train_features = pd.read_csv(f'{input_folder}/train_features.csv')
train_targets_scored = pd.read_csv(f'{input_folder}/train_targets_scored.csv')
train_targets_nonscored = pd.read_csv(f'{input_folder}/train_targets_nonscored.csv')
test_features = pd.read_csv(f'{input_folder}/test_features.csv')
sample_submission = pd.read_csv(f'{input_folder}/sample_submission.csv')
# In[ ]:
train_targets_nonscored = train_targets_nonscored.loc[:, train_targets_nonscored.sum() != 0]
print(train_targets_nonscored.shape)
# In[ ]:
for c in train_targets_nonscored.columns:
if c != "sig_id":
train_targets_nonscored[c] = np.maximum(PMIN, np.minimum(PMAX, train_targets_nonscored[c]))
# In[ ]:
print("(nsamples, nfeatures)")
print(train_features.shape)
print(train_targets_scored.shape)
print(train_targets_nonscored.shape)
print(test_features.shape)
print(sample_submission.shape)
# In[ ]:
GENES = [col for col in train_features.columns if col.startswith('g-')]
CELLS = [col for col in train_features.columns if col.startswith('c-')]
# In[ ]:
def seed_everything(seed=1903):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=1903)
# In[ ]:
# In[ ]:
# GENES
n_comp = 90
n_dim = 45
data = pd.concat([pd.DataFrame(train_features[GENES]), pd.DataFrame(test_features[GENES])])
if IS_TRAIN:
fa = FactorAnalysis(n_components=n_comp, random_state=1903).fit(data[GENES])
pd.to_pickle(fa, f'{MODEL_DIR}/{NB}_factor_analysis_g.pkl')
umap = UMAP(n_components=n_dim, random_state=1903).fit(data[GENES])
pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_g.pkl')
else:
fa = pd.read_pickle(f'{MODEL_DIR}/{NB}_factor_analysis_g.pkl')
umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_g.pkl')
data2 = (fa.transform(data[GENES]))
data3 = (umap.transform(data[GENES]))
train2 = data2[:train_features.shape[0]]
test2 = data2[-test_features.shape[0]:]
train3 = data3[:train_features.shape[0]]
test3 = data3[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'fa_G-{i}' for i in range(n_comp)])
train3 = pd.DataFrame(train3, columns=[f'umap_G-{i}' for i in range(n_dim)])
test2 = pd.DataFrame(test2, columns=[f'fa_G-{i}' for i in range(n_comp)])
test3 = pd.DataFrame(test3, columns=[f'umap_G-{i}' for i in range(n_dim)])
train_features = pd.concat((train_features, train2, train3), axis=1)
test_features = pd.concat((test_features, test2, test3), axis=1)
#CELLS
n_comp = 50
n_dim = 25
data = pd.concat([pd.DataFrame(train_features[CELLS]), pd.DataFrame(test_features[CELLS])])
if IS_TRAIN:
fa = FactorAnalysis(n_components=n_comp, random_state=1903).fit(data[CELLS])
pd.to_pickle(fa, f'{MODEL_DIR}/{NB}_factor_analysis_c.pkl')
umap = UMAP(n_components=n_dim, random_state=1903).fit(data[CELLS])
pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_c.pkl')
else:
fa = pd.read_pickle(f'{MODEL_DIR}/{NB}_factor_analysis_c.pkl')
umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_c.pkl')
data2 = (fa.transform(data[CELLS]))
data3 = (umap.transform(data[CELLS]))
train2 = data2[:train_features.shape[0]]
test2 = data2[-test_features.shape[0]:]
train3 = data3[:train_features.shape[0]]
test3 = data3[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'fa_C-{i}' for i in range(n_comp)])
train3 = pd.DataFrame(train3, columns=[f'umap_C-{i}' for i in range(n_dim)])
test2 = pd.DataFrame(test2, columns=[f'fa_C-{i}' for i in range(n_comp)])
test3 = pd.DataFrame(test3, columns=[f'umap_C-{i}' for i in range(n_dim)])
train_features = pd.concat((train_features, train2, train3), axis=1)
test_features = pd.concat((test_features, test2, test3), axis=1)
# drop_cols = [f'c-{i}' for i in range(n_comp,len(CELLS))]
# In[ ]:
# In[ ]:
from sklearn.preprocessing import QuantileTransformer
for col in (GENES + CELLS):
vec_len = len(train_features[col].values)
vec_len_test = len(test_features[col].values)
raw_vec = pd.concat([train_features, test_features])[col].values.reshape(vec_len+vec_len_test, 1)
if IS_TRAIN:
transformer = QuantileTransformer(n_quantiles=100, random_state=123, output_distribution="normal")
transformer.fit(raw_vec)
pd.to_pickle(transformer, f'{MODEL_DIR}/{NB}_{col}_quantile_transformer.pkl')
else:
transformer = pd.read_pickle(f'{MODEL_DIR}/{NB}_{col}_quantile_transformer.pkl')
train_features[col] = transformer.transform(train_features[col].values.reshape(vec_len, 1)).reshape(1, vec_len)[0]
test_features[col] = transformer.transform(test_features[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0]
# In[ ]:
print(train_features.shape)
print(test_features.shape)
# In[ ]:
# In[ ]:
train = train_features.merge(train_targets_nonscored, on='sig_id')
train = train[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)
test = test_features[test_features['cp_type']!='ctl_vehicle'].reset_index(drop=True)
target = train[train_targets_nonscored.columns]
# In[ ]:
train = train.drop('cp_type', axis=1)
test = test.drop('cp_type', axis=1)
# In[ ]:
print(target.shape)
print(train_features.shape)
print(test_features.shape)
print(train.shape)
print(test.shape)
# In[ ]:
target_cols = target.drop('sig_id', axis=1).columns.values.tolist()
# In[ ]:
folds = train.copy()
mskf = MultilabelStratifiedKFold(n_splits=NFOLDS)
for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
folds.loc[v_idx, 'kfold'] = int(f)
folds['kfold'] = folds['kfold'].astype(int)
folds
# In[ ]:
print(train.shape)
print(folds.shape)
print(test.shape)
print(target.shape)
print(sample_submission.shape)
# In[ ]:
class MoADataset:
def __init__(self, features, targets):
self.features = features
self.targets = targets
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {
'x' : torch.tensor(self.features[idx, :], dtype=torch.float),
'y' : torch.tensor(self.targets[idx, :], dtype=torch.float)
}
return dct
class TestDataset:
def __init__(self, features):
self.features = features
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {
'x' : torch.tensor(self.features[idx, :], dtype=torch.float)
}
return dct
# In[ ]:
def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device):
model.train()
final_loss = 0
for data in dataloader:
optimizer.zero_grad()
inputs, targets = data['x'].to(device), data['y'].to(device)
# print(inputs.shape)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
scheduler.step()
final_loss += loss.item()
final_loss /= len(dataloader)
return final_loss
def valid_fn(model, loss_fn, dataloader, device):
model.eval()
final_loss = 0
valid_preds = []
for data in dataloader:
inputs, targets = data['x'].to(device), data['y'].to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
final_loss += loss.item()
valid_preds.append(outputs.sigmoid().detach().cpu().numpy())
final_loss /= len(dataloader)
valid_preds = np.concatenate(valid_preds)
return final_loss, valid_preds
def inference_fn(model, dataloader, device):
model.eval()
preds = []
for data in dataloader:
inputs = data['x'].to(device)
with torch.no_grad():
outputs = model(inputs)
preds.append(outputs.sigmoid().detach().cpu().numpy())
preds = np.concatenate(preds)
return preds
# In[ ]:
class Model(nn.Module):
def __init__(self, num_features, num_targets, hidden_size):
super(Model, self).__init__()
self.batch_norm1 = nn.BatchNorm1d(num_features)
self.dropout1 = nn.Dropout(0.15)
self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.dropout2 = nn.Dropout(0.3)
self.dense2 = nn.Linear(hidden_size, hidden_size)
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.dropout3 = nn.Dropout(0.25)
self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))
def forward(self, x):
x = self.batch_norm1(x)
x = self.dropout1(x)
x = F.leaky_relu(self.dense1(x))
x = self.batch_norm2(x)
x = self.dropout2(x)
x = F.leaky_relu(self.dense2(x))
x = self.batch_norm3(x)
x = self.dropout3(x)
x = self.dense3(x)
return x
# In[ ]:
def process_data(data):
data = pd.get_dummies(data, columns=['cp_time','cp_dose'])
return data
# In[ ]:
feature_cols = [c for c in process_data(folds).columns if c not in target_cols]
feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']]
len(feature_cols)
# In[ ]:
num_features=len(feature_cols)
num_targets=len(target_cols)
hidden_size=2048
# In[ ]:
def run_training(fold, seed):
seed_everything(seed)
train = process_data(folds)
test_ = process_data(test)
trn_idx = train[train['kfold'] != fold].index
val_idx = train[train['kfold'] == fold].index
train_df = train[train['kfold'] != fold].reset_index(drop=True)
valid_df = train[train['kfold'] == fold].reset_index(drop=True)
x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values
x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values
train_dataset = MoADataset(x_train, y_train)
valid_dataset = MoADataset(x_valid, y_valid)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e3,
max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader))
loss_fn = nn.BCEWithLogitsLoss()
early_stopping_steps = EARLY_STOPPING_STEPS
early_step = 0
oof = np.zeros((len(train), target.iloc[:, 1:].shape[1]))
best_loss = np.inf
best_loss_epoch = -1
if IS_TRAIN:
for epoch in range(EPOCHS):
train_loss = train_fn(model, optimizer, scheduler, loss_fn, trainloader, DEVICE)
valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE)
if valid_loss < best_loss:
best_loss = valid_loss
best_loss_epoch = epoch
oof[val_idx] = valid_preds
torch.save(model.state_dict(), f"{MODEL_DIR}/{NB}-nonscored-SEED{seed}-FOLD{fold}_.pth")
elif(EARLY_STOP == True):
early_step += 1
if (early_step >= early_stopping_steps):
break
if epoch % 10 == 0 or epoch == EPOCHS-1:
print(f"seed: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}, best_loss: {best_loss:.6f}, best_loss_epoch: {best_loss_epoch}")
#--------------------- PREDICTION---------------------
x_test = test_[feature_cols].values
testdataset = TestDataset(x_test)
testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.load_state_dict(torch.load(f"{MODEL_DIR}/{NB}-nonscored-SEED{seed}-FOLD{fold}_.pth"))
model.to(DEVICE)
if not IS_TRAIN:
valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE)
oof[val_idx] = valid_preds
predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1]))
predictions = inference_fn(model, testloader, DEVICE)
return oof, predictions
# In[ ]:
def run_k_fold(NFOLDS, seed):
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for fold in range(NFOLDS):
oof_, pred_ = run_training(fold, seed)
predictions += pred_ / NFOLDS
oof += oof_
return oof, predictions
# In[ ]:
SEED = range(NSEEDS)
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
time_start = time.time()
for seed in SEED:
oof_, predictions_ = run_k_fold(NFOLDS, seed)
oof += oof_ / len(SEED)
predictions += predictions_ / len(SEED)
print(f"elapsed time: {time.time() - time_start}")
train[target_cols] = oof
test[target_cols] = predictions
print(oof.shape)
print(predictions.shape)
# In[ ]:
train.to_pickle(f"{INT_DIR}/{NB}-train_nonscore_pred.pkl")
test.to_pickle(f"{INT_DIR}/{NB}-test_nonscore_pred.pkl")
# In[ ]:
len(target_cols)
# In[ ]:
train[target_cols] = np.maximum(PMIN, np.minimum(PMAX, train[target_cols]))
valid_results = train_targets_nonscored.drop(columns=target_cols).merge(train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)
y_true = train_targets_nonscored[target_cols].values
y_true = y_true > 0.5
y_pred = valid_results[target_cols].values
score = 0
for i in range(len(target_cols)):
score_ = log_loss(y_true[:, i], y_pred[:, i])
score += score_ / target.shape[1]
print("CV log_loss: ", score)
# In[ ]:
EPOCHS = 25
# In[ ]:
nonscored_target = [c for c in train[train_targets_nonscored.columns] if c != "sig_id"]
# In[ ]:
nonscored_target
# In[ ]:
train = | pd.read_pickle(f"{INT_DIR}/{NB}-train_nonscore_pred.pkl") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 17:50:04 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
df = | pd.read_csv(r'CoinDatasets\ripple_price.csv') | pandas.read_csv |
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from fatetrack_connections import buildFeatureFrame, buildOffsetFrame, generateCandidates, generateLinks, DivSimScore, DivSetupScore, DivisionCanditates, UpdateConnectionsDiv, TranslationTable, SolveMinCostTable, ReviewCostTable
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable,path="./"):
frame0 = buildFeatureFrame(filename_t0,timepoint,pathtoimage=path);
frame1 = buildFeatureFrame(filename_t1,timepoint+1,pathtoimage=path);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = pd.DataFrame()
if (timepoint == 0):
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-0"])
tmpParent = "NaN"
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-0"])
tmpParent = int(RajTLG_translation.loc[RajTLG_translation["RajTLG_ID"+"_"+str(timepoint+1)] == tmpID,
"RajTLG_ID"+"_"+str(timepoint)])
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
RajTLGFrame = RajTLGFrame.reset_index(drop=True)
RajTLGFrame = RajTLGFrame.rename(columns={0:"pointID", 1:"frameNumber",
2:"xCoord",3:"yCoord",4:"parentID"})
RajTLGFrame["annotation"] = "none"
#RajTLGFrame.to_csv(outfilename,index=False)
return(RajTLGFrame)
def MatchToGoldStd(FileCompare,FileGoldSTD):
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack = pd.read_csv(FileCompare)
GoldTranslationTable = pd.DataFrame()
for obj in range(FateTrack.shape[0]):
FateID = FateTrack.loc[obj,"pointID"]
frame = FateTrack.loc[obj,"frameNumber"]
xC = FateTrack.loc[obj,"xCoord"]
yC = FateTrack.loc[obj,"yCoord"]
tmpGold = GoldSTD.loc[GoldSTD["frameNumber"] == frame,]
tmpGold = tmpGold.reset_index(drop=True)
dist = np.array(np.sqrt((tmpGold["xCoord"]-xC)**2 + (tmpGold["yCoord"]-yC)**2))
GoldIndex = np.where(dist == dist.min())[0][0]
GoldID = tmpGold.loc[GoldIndex,"pointID"]
GoldTranslationTable = GoldTranslationTable.append(pd.DataFrame([GoldID,FateID]).T)
GoldTranslationTable = GoldTranslationTable.rename(columns={0:"GoldID",1:"FateID"})
return(GoldTranslationTable)
def CheckAccuracy(frame,FileCompare,FileGoldSTD,skip=0):
TranslateGold = MatchToGoldStd(FileCompare,FileGoldSTD)
GoldSTD = pd.read_csv(FileGoldSTD)
FateTrack = pd.read_csv(FileCompare)
FateTrack = FateTrack.loc[FateTrack["frameNumber"]==frame,]
FateTrack = FateTrack.reset_index(drop=True)
GoldSTD = GoldSTD.loc[GoldSTD["frameNumber"]==frame,]
GoldSTD = GoldSTD.reset_index(drop=True)
correct=0
incorrect=0
for obj in range(FateTrack.shape[0]):
FateID = FateTrack.loc[obj,"pointID"]
FateParent = FateTrack.loc[obj,"parentID"]
transGoldID = TranslateGold.loc[TranslateGold["FateID"]==FateID,"GoldID"].values[0] ;
transGoldParent = TranslateGold.loc[TranslateGold["FateID"]==FateParent,"GoldID"] ;
if not(transGoldParent.empty):
transGoldParent = transGoldParent.values[0]
actualGoldParent = GoldSTD.loc[GoldSTD["pointID"] == transGoldID,"parentID"]
if (not(actualGoldParent.empty | math.isnan(actualGoldParent.values[0]))):
actualGoldParent = int(actualGoldParent.values[0])
if(actualGoldParent == transGoldParent):
correct = correct+1
else:
incorrect = incorrect+1
results = pd.DataFrame([frame, skip, correct, incorrect]).T
results = results.rename(columns={0:"Frame",1:"Skip",2:"Correct",3:"Incorrect"})
return(results)
def AssembleAccMeasurements(FileCompare,FileGoldSTD,skip=0):
GoldSTD = pd.read_csv(FileGoldSTD)
maxFrame = np.max(GoldSTD["frameNumber"])
completeResults = pd.DataFrame()
for frame in (np.array(range(1,maxFrame))+1):
tmpFrame = CheckAccuracy(frame=frame,FileCompare=FileCompare,FileGoldSTD=FileGoldSTD,skip=skip)
completeResults = completeResults.append(tmpFrame)
completeResults = completeResults.reset_index(drop=True)
return(completeResults)
def redefineGold(FileGoldSTD, outfilename, skip = 1,startTime = 0):
GoldSTD = pd.read_csv(FileGoldSTD)
sub = startTime+1
maxFrame = np.max(GoldSTD['frameNumber'])
frames_to_keep = np.array(range(startTime+1,maxFrame+1,skip+1))
starter_frame = frames_to_keep[0]
other_frames = frames_to_keep[1:]
newGoldSTD = GoldSTD.loc[GoldSTD["frameNumber"].isin(other_frames),:]
newGoldSTD = newGoldSTD.reset_index(drop=True)
starterGold = GoldSTD.loc[GoldSTD["frameNumber"]==starter_frame,:]
starterGold = starterGold.reset_index(drop=True)
starterGold["parentID"] = "NaN"
pointsNew = pd.concat([starterGold, newGoldSTD])["pointID"].values
framesOld = np.unique(newGoldSTD["frameNumber"])
transmitFrame = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pandas as pd
import pytest
from covsirphy import find_args, save_dataframe, Filer, StopWatch, Evaluator
from covsirphy import UnExpectedValueError
class TestArgument(object):
def test_find_args(self):
assert find_args(Filer, directory="output") == {"directory": "output"}
assert find_args([Filer, Filer.files], directory="output") == {"directory": "output"}
class TestFiler(object):
def test_filing(self):
with pytest.raises(ValueError):
Filer("output", numbering="xxx")
filer = Filer(directory="output", prefix="jpn", suffix=None, numbering="01")
# Create filenames
filer.png("records")
filer.jpg("records")
filer.csv("records", index=True)
# Check files
assert len(filer.files(ext=None)) == 3
assert len(filer.files(ext="png")) == 1
assert len(filer.files(ext="jpg")) == 1
assert len(filer.files(ext="csv")) == 1
# Save CSV file
warnings.filterwarnings("ignore", category=DeprecationWarning)
save_dataframe(pd.DataFrame(), filename=None, index=False)
class TestStopWatch(object):
def test_stopwatch(self):
stop_watch = StopWatch()
assert isinstance(stop_watch.stop_show(), str)
class TestEvaluator(object):
@pytest.mark.parametrize("metric", ["ME", "MAE", "MSE", "MSLE", "MAPE", "RMSE", "RMSLE", "R2"])
def test_score_series(self, metric):
assert metric in Evaluator.metrics()
true = pd.Series([5, 10, 8, 6])
pred = pd.Series([8, 12, 6, 5])
evaluator = Evaluator(true, pred, on=None)
score_metric = evaluator.score(metric=metric)
score_metrics = evaluator.score(metrics=metric)
assert score_metric == score_metrics
assert isinstance(Evaluator.smaller_is_better(metric=metric), bool)
@pytest.mark.parametrize("metric", ["ME", "MAE", "MSE", "MSLE", "MAPE", "RMSE", "RMSLE", "R2"])
@pytest.mark.parametrize("how", ["all", "inner"])
@pytest.mark.parametrize("on", [None, "join_on"])
def test_score_dataframe(self, metric, how, on):
true = pd.DataFrame(
{
"join_on": [0, 1, 2, 3, 4, 5],
"value": [20, 40, 30, 50, 90, 10]
}
)
pred = pd.DataFrame(
{
"join_on": [0, 2, 3, 4, 6, 7],
"value": [20, 40, 30, 50, 110, 55]
}
)
evaluator = Evaluator(true, pred, how=how, on=on)
if metric == "ME" and (how == "all" or on is None):
with pytest.raises(ValueError):
evaluator.score(metric=metric)
return
assert isinstance(evaluator.score(metric=metric), float)
def test_error(self):
with pytest.raises(TypeError):
Evaluator([1, 2, 3], [2, 5, 7])
true = pd.Series([5, 10, 8, 6])
pred = | pd.Series([8, 12, 6, 5]) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from weakly_supervised_parser.settings import PTB_TRAIN_GOLD_WITHOUT_PUNCTUATION_ALIGNED_PATH
from weakly_supervised_parser.settings import PTB_TRAIN_SENTENCES_WITHOUT_PUNCTUATION_PATH
from weakly_supervised_parser.utils.prepare_dataset import DataLoaderHelper
from weakly_supervised_parser.inference import process_test_sample
from weakly_supervised_parser.tree.helpers import get_constituents, get_distituents
def prepare_data_for_self_training(
inside_model, train_initial, valid_initial, threshold, num_train_rows, num_valid_examples, seed, scale_axis, predict_batch_size
):
train_sentences = DataLoaderHelper(input_file_object=PTB_TRAIN_SENTENCES_WITHOUT_PUNCTUATION_PATH).read_lines()
train_gold_file_path = PTB_TRAIN_GOLD_WITHOUT_PUNCTUATION_ALIGNED_PATH
lst = []
for train_index, train_sentence in enumerate(train_sentences):
if train_index == num_train_rows:
break
best_parse = process_test_sample(
train_index,
train_sentence,
train_gold_file_path,
predict_type="inside",
scale_axis=scale_axis,
predict_batch_size=predict_batch_size,
model=inside_model,
)
best_parse_get_constituents = get_constituents(best_parse)
best_parse_get_distituents = get_distituents(best_parse)
if best_parse_get_constituents:
constituents_proba = inside_model.predict_proba(
pd.DataFrame(dict(sentence=best_parse_get_constituents)), scale_axis=scale_axis, predict_batch_size=predict_batch_size
)[:, 1]
df_constituents = pd.DataFrame({"sentence": best_parse_get_constituents, "label": constituents_proba})
df_constituents["label"] = np.where(df_constituents["label"] > threshold, 1, -1)
if best_parse_get_distituents:
distituents_proba = inside_model.predict_proba(pd.DataFrame(dict(sentence=best_parse_get_distituents)))[:, 0]
df_distituents = pd.DataFrame({"sentence": best_parse_get_distituents, "label": distituents_proba})
df_distituents["label"] = np.where(df_distituents["label"] > threshold, 0, -1)
if best_parse_get_constituents and best_parse_get_distituents:
out = pd.concat([df_constituents, df_distituents])
elif best_parse_get_constituents and not best_parse_get_distituents:
out = df_constituents
elif best_parse_get_distituents and not best_parse_get_constituents:
out = df_distituents
lst.append(out)
df_out = pd.concat(lst).sample(frac=1.0, random_state=seed)
df_out.drop_duplicates(subset=["sentence"], inplace=True)
df_out.reset_index(drop=True, inplace=True)
valid_idx = np.concatenate(
(
df_out[df_out["label"] == 1].index.values[: int(num_valid_examples // 4)],
df_out[df_out["label"] == 0].index.values[: int(num_valid_examples // (4 / 3))],
)
)
valid_df = df_out.loc[valid_idx]
train_idx = df_out.loc[~df_out.index.isin(valid_idx)].index.values
train_df = df_out.loc[np.concatenate((train_idx, df_out[df_out["label"] == -1].index.values))]
train_augmented = pd.concat([train_initial, train_df]).drop_duplicates(subset=["sentence"])
valid_augmented = pd.concat([valid_initial, valid_df]).drop_duplicates(subset=["sentence"])
return train_augmented, valid_augmented
def prepare_outside_strings(inside_model, upper_threshold, lower_threshold, num_train_rows, seed, scale_axis, predict_batch_size):
train_sentences = DataLoaderHelper(input_file_object=PTB_TRAIN_SENTENCES_WITHOUT_PUNCTUATION_PATH).read_lines()
train_gold_file_path = PTB_TRAIN_GOLD_WITHOUT_PUNCTUATION_ALIGNED_PATH
lst = []
for train_index, train_sentence in enumerate(train_sentences):
if train_index == num_train_rows:
break
best_parse, df = process_test_sample(
train_index,
train_sentence,
train_gold_file_path,
predict_type="inside",
model=inside_model,
scale_axis=scale_axis,
predict_batch_size=predict_batch_size,
return_df=True,
)
outside_constituent_samples = pd.DataFrame(dict(sentence=df.loc[df["scores"] > upper_threshold, "outside_sentence"].values, label=1))
outside_distituent_samples = pd.DataFrame(dict(sentence=df.loc[df["scores"] < lower_threshold, "outside_sentence"].values, label=0))
lst.append( | pd.concat([outside_constituent_samples, outside_distituent_samples]) | pandas.concat |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import datetime
import functools
import warnings
from abc import ABC, abstractmethod
import collections
from collections.abc import Sequence
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
import toolz
from pandas import DataFrame, date_range
from pandas.tseries.holiday import AbstractHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from pytz import UTC
from exchange_calendars import errors
from .calendar_helpers import (
NANOSECONDS_PER_MINUTE,
NP_NAT,
Date,
Minute,
Session,
TradingMinute,
_TradingIndex,
compute_minutes,
next_divider_idx,
one_minute_earlier,
one_minute_later,
parse_date,
parse_session,
parse_timestamp,
parse_trading_minute,
previous_divider_idx,
)
from .utils.memoize import lazyval
from .utils.pandas_utils import days_at_time
GLOBAL_DEFAULT_START = pd.Timestamp.now(tz=UTC).floor("D") - pd.DateOffset(years=20)
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
GLOBAL_DEFAULT_END = pd.Timestamp.now(tz=UTC).floor("D") + pd.DateOffset(years=1)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY)
WEEKENDS = (SATURDAY, SUNDAY)
def selection(arr, start, end):
predicates = []
if start is not None:
predicates.append(start.tz_localize(UTC) <= arr)
if end is not None:
predicates.append(arr < end.tz_localize(UTC))
if not predicates:
return arr
return arr[np.all(predicates, axis=0)]
def _group_times(all_days, times, tz, offset=0):
if times is None:
return None
elements = [
days_at_time(selection(all_days, start, end), time, tz, offset)
for (start, time), (end, _) in toolz.sliding_window(
2, toolz.concatv(times, [(None, None)])
)
]
return elements[0].append(elements[1:])
class deprecate:
"""Decorator for deprecated/renamed ExchangeCalendar methods."""
def __init__(
self,
deprecated_release: str = "3.4",
removal_release: str = "4.0",
alt: str = "",
renamed: bool = True,
prop: bool = False,
):
self.deprecated_release = "release " + deprecated_release
self.removal_release = "release " + removal_release
self.alt = alt
self.renamed = renamed
if renamed:
assert alt, "pass `alt` if renaming"
self.obj_type = "property" if prop else "method"
self.is_method = not prop
def __call__(self, f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
warnings.warn(self._message(f), FutureWarning)
return f(*args, **kwargs)
return wrapped_f
def _message(self, f):
msg = (
f"`{f.__name__}` was deprecated in {self.deprecated_release}"
f" and will be removed in {self.removal_release}."
)
if self.alt:
if self.renamed:
msg += f" The {self.obj_type} has been renamed `{self.alt}`."
if self.is_method:
msg += (
f" NB parameter names may also have changed (see "
f" documentation for `{self.alt}`)."
)
else:
msg += f" Use `{self.alt}`."
return msg
class HolidayCalendar(AbstractHolidayCalendar):
def __init__(self, rules):
super(HolidayCalendar, self).__init__(rules=rules)
class ExchangeCalendar(ABC):
"""Representation of timing information of a single market exchange.
The timing information comprises sessions, open/close times and, for
exchanges that observe an intraday break, break_start/break_end times.
For exchanges that do not observe an intraday break a session
represents a contiguous set of minutes. Where an exchange observes
an intraday break a session represents two contiguous sets of minutes
separated by the intraday break.
Each session has a label that is midnight UTC. It is important to note
that a session label should not be considered a specific point in time,
and that midnight UTC is just being used for convenience.
For each session, we store the open and close time together with, for
those exchanges with breaks, the break start and break end. All times
are defined as UTC.
Parameters
----------
start : default: later of 20 years ago or first supported start date.
First calendar session will be `start`, if `start` is a session, or
first session after `start`.
end : default: earliest of 1 year from 'today' or last supported end date.
Last calendar session will be `end`, if `end` is a session, or last
session before `end`.
side : default: "both" ("left" for 24 hour calendars)
Define which of session open/close and break start/end should
be treated as a trading minute:
"left" - treat session open and break_start as trading minutes,
do not treat session close or break_end as trading minutes.
"right" - treat session close and break_end as trading minutes,
do not treat session open or break_start as tradng minutes.
"both" - treat all of session open, session close, break_start
and break_end as trading minutes.
"neither" - treat none of session open, session close,
break_start or break_end as trading minutes.
Raises
------
ValueError
If `start` is earlier than the earliest supported start date.
If `end` is later than the latest supported end date.
If `start` parses to a later date than `end`.
Notes
-----
Exchange calendars were originally defined for the Zipline package from
Quantopian under the package 'trading_calendars'. Since 2021 they have
been maintained under the 'exchange_calendars' package (a fork of
'trading_calendars') by an active community of contributing users.
Some calendars have defined start and end bounds within which
contributors have endeavoured to ensure the calendar's accuracy and
outside of which the calendar would not be accurate. These bounds
are enforced such that passing `start` or `end` as dates that are
out-of-bounds will raise a ValueError. The bounds of each calendar are
exposed via the `bound_start` and `bound_end` properties.
Many calendars do not have bounds defined (in these cases `bound_start`
and/or `bound_end` return None). These calendars can be created through
any date range although it should be noted that the earlier the start
date, the greater the potential for inaccuracies.
In all cases, no guarantees are offered as to the accuracy of any
calendar.
Internal method parameters:
_parse: bool
Determines if a `minute` or `session` parameter should be
parsed (default True). Passed as False:
- internally to prevent double parsing.
- by tests for efficiency.
"""
_LEFT_SIDES = ["left", "both"]
_RIGHT_SIDES = ["right", "both"]
def __init__(
self,
start: Date | None = None,
end: Date | None = None,
side: str | None = None,
):
side = side if side is not None else self.default_side()
if side not in self.valid_sides():
raise ValueError(
f"`side` must be in {self.valid_sides()} although received as {side}."
)
self._side = side
if start is None:
start = self.default_start
else:
start = parse_date(start, "start", raise_oob=False)
if self.bound_start is not None and start < self.bound_start:
raise ValueError(self._bound_start_error_msg(start))
if end is None:
end = self.default_end
else:
end = parse_date(end, "end", raise_oob=False)
if self.bound_end is not None and end > self.bound_end:
raise ValueError(self._bound_end_error_msg(end))
if start >= end:
raise ValueError(
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end}'."
)
# Midnight in UTC for each trading day.
_all_days = date_range(start, end, freq=self.day, tz=UTC)
if _all_days.empty:
raise errors.NoSessionsError(calendar_name=self.name, start=start, end=end)
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = _group_times(
_all_days,
self.open_times,
self.tz,
self.open_offset,
)
self._break_starts = _group_times(
_all_days,
self.break_start_times,
self.tz,
)
self._break_ends = _group_times(
_all_days,
self.break_end_times,
self.tz,
)
self._closes = _group_times(
_all_days,
self.close_times,
self.tz,
self.close_offset,
)
# Apply any special offsets first
self.apply_special_offsets(_all_days, start, end)
# Series mapping sessions with nonstandard opens/closes.
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
_remove_breaks_for_special_dates(
_all_days,
self._break_starts,
_special_closes,
)
_remove_breaks_for_special_dates(
_all_days,
self._break_ends,
_special_closes,
)
if self._break_starts is None:
break_starts = None
else:
break_starts = self._break_starts.tz_localize(None)
if self._break_ends is None:
break_ends = None
else:
break_ends = self._break_ends.tz_localize(None)
self.schedule = DataFrame(
index=_all_days,
data=collections.OrderedDict(
[
("market_open", self._opens.tz_localize(None)),
("break_start", break_starts),
("break_end", break_ends),
("market_close", self._closes.tz_localize(None)),
]
),
dtype="datetime64[ns]",
)
self.opens_nanos = self.schedule.market_open.values.astype(np.int64)
self.break_starts_nanos = self.schedule.break_start.values.astype(np.int64)
self.break_ends_nanos = self.schedule.break_end.values.astype(np.int64)
self.closes_nanos = self.schedule.market_close.values.astype(np.int64)
_check_breaks_match(self.break_starts_nanos, self.break_ends_nanos)
self._late_opens = _special_opens.index
self._early_closes = _special_closes.index
# Methods and properties that define calendar and which should be
# overriden or extended, if and as required, by subclass.
@property
@abstractmethod
def name(self) -> str:
raise NotImplementedError()
@property
def bound_start(self) -> pd.Timestamp | None:
"""Earliest date from which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Earliest date from which calendar can be constructed. Must have
tz as "UTC". None if no limit.
Notes
-----
To impose a constraint on the earliest date from which a calendar
can be constructed subclass should override this method and
optionally override `_bound_start_error_msg`.
"""
return None
@property
def bound_end(self) -> pd.Timestamp | None:
"""Latest date to which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Latest date to which calendar can be constructed. Must have tz
as "UTC". None if no limit.
Notes
-----
To impose a constraint on the latest date to which a calendar can
be constructed subclass should override this method and optionally
override `_bound_end_error_msg`.
"""
return None
def _bound_start_error_msg(self, start: pd.Timestamp) -> str:
"""Return error message to handle `start` being out-of-bounds.
See Also
--------
bound_start
"""
return (
f"The earliest date from which calendar {self.name} can be"
f" evaluated is {self.bound_start}, although received `start` as"
f" {start}."
)
def _bound_end_error_msg(self, end: pd.Timestamp) -> str:
"""Return error message to handle `end` being out-of-bounds.
See Also
--------
bound_end
"""
return (
f"The latest date to which calendar {self.name} can be evaluated"
f" is {self.bound_end}, although received `end` as {end}."
)
@property
def default_start(self) -> pd.Timestamp:
if self.bound_start is None:
return GLOBAL_DEFAULT_START
else:
return max(GLOBAL_DEFAULT_START, self.bound_start)
@property
def default_end(self) -> pd.Timestamp:
if self.bound_end is None:
return GLOBAL_DEFAULT_END
else:
return min(GLOBAL_DEFAULT_END, self.bound_end)
@property
@abstractmethod
def tz(self):
raise NotImplementedError()
@property
@abstractmethod
def open_times(self) -> Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local open time(s).
Returns
-------
Sequence[tuple[pd.Timestamp | None, datetime.time]]:
Sequence of tuples representing (start_date, open_time) where:
start_date: date from which `open_time` applies. None for
first item.
open_time: exchange's local open time.
Notes
-----
Examples for concreting `open_times` on a subclass.
Example where open time is constant throughout period covered by
calendar:
open_times = ((None, datetime.time(9)),)
Example where open times have varied over period covered by
calendar:
open_times = (
(None, time(9, 30)),
(pd.Timestamp("1978-04-01"), datetime.time(10, 0)),
(pd.Timestamp("1986-04-01"), datetime.time(9, 40)),
(pd.Timestamp("1995-01-01"), datetime.time(9, 30)),
(pd.Timestamp("1998-12-07"), datetime.time(9, 0)),
)
"""
raise NotImplementedError()
@property
def break_start_times(
self,
) -> None | Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local break start time(s).
As `close_times` although times represent the close of the morning
subsession. None if exchange does not observe a break.
"""
return None
@property
def break_end_times(
self,
) -> None | Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local break end time(s).
As `open_times` although times represent the open of the afternoon
subsession. None if exchange does not observe a break.
"""
return None
@property
@abstractmethod
def close_times(self) -> Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local close time(s).
Returns
-------
Sequence[tuple[pd.Timestamp | None, datetime.time]]:
Sequence of tuples representing (start_date, close_time) where:
start_date: date from which `close_time` applies. None for
first item.
close_time: exchange's local close time.
Notes
-----
Examples for concreting `close_times` on a subclass.
Example where close time is constant throughout period covered by
calendar:
close_times = ((None, time(17, 30)),)
Example where close times have varied over period covered by
calendar:
close_times = (
(None, datetime.time(17, 30)),
(pd.Timestamp("1986-04-01"), datetime.time(17, 20)),
(pd.Timestamp("1995-01-01"), datetime.time(17, 0)),
(pd.Timestamp("2016-08-01"), datetime.time(17, 30)),
)
"""
raise NotImplementedError()
@property
def weekmask(self) -> str:
"""Indicator of weekdays on which the exchange is open.
Default is '1111100' (i.e. Monday-Friday).
See Also
--------
numpy.busdaycalendar
"""
return "1111100"
@property
def open_offset(self) -> int:
"""Day offset of open time(s) relative to session.
Returns
-------
int
0 if the date components of local open times are as the
corresponding session labels.
-1 if the date components of local open times are the day
before the corresponding session labels.
"""
return 0
@property
def close_offset(self) -> int:
"""Day offset of close time(s) relative to session.
Returns
-------
int
0 if the date components of local close times are as the
corresponding session labels.
1 if the date components of local close times are the day
after the corresponding session labels.
"""
return 0
@property
def regular_holidays(self) -> HolidayCalendar | None:
"""Holiday calendar representing calendar's regular holidays."""
return None
@property
def adhoc_holidays(self) -> list[pd.Timestamp]:
"""List of non-regular holidays.
Returns
-------
list[pd.Timestamp]
List of tz-naive timestamps representing non-regular holidays.
"""
return []
@property
def special_opens(self) -> list[tuple[datetime.time, HolidayCalendar]]:
"""Regular non-standard open times.
Example of what would be defined as a special open:
"EVERY YEAR on national lie-in day the exchange opens
at 13:00 rather than the standard 09:00".
Returns
-------
list[tuple[datetime.time, HolidayCalendar]]:
list of tuples each describing a regular non-standard open
time:
[0] datetime.time: regular non-standard open time.
[1] HolidayCalendar: holiday calendar describing occurence.
"""
return []
@property
def special_opens_adhoc(
self,
) -> list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
"""Adhoc non-standard open times.
Defines non-standard open times that cannot be otherwise codified
within within `special_opens`.
Example of an event to define as an adhoc special open:
"On 2022-02-14 due to a typhoon the exchange opened at 13:00,
rather than the standard 09:00".
Returns
-------
list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
List of tuples each describing an adhoc non-standard open time:
[0] datetime.time: non-standard open time.
[1] pd.Timestamp | list[pd.Timestamp]: date or dates
corresponding with the non-standard open time.
"""
return []
@property
def special_closes(self) -> list[tuple[datetime.time, HolidayCalendar]]:
"""Regular non-standard close times.
Example of what would be defined as a special close:
"On christmas eve the exchange closes at 14:00 rather than
the standard 17:00".
Returns
-------
list[tuple[datetime.time, HolidayCalendar]]:
list of tuples each describing a regular non-standard close
time:
[0] datetime.time: regular non-standard close time.
[1] HolidayCalendar: holiday calendar describing occurence.
"""
return []
@property
def special_closes_adhoc(
self,
) -> list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
"""Adhoc non-standard close times.
Defines non-standard close times that cannot be otherwise codified
within within `special_closes`.
Example of an event to define as an adhoc special close:
"On 2022-02-19 due to a typhoon the exchange closed at 12:00,
rather than the standard 16:00".
Returns
-------
list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
List of tuples each describing an adhoc non-standard close
time:
[0] datetime.time: non-standard close time.
[1] pd.Timestamp | list[pd.Timestamp]: date or dates
corresponding with the non-standard close time.
"""
return []
def apply_special_offsets(self, _all_days, start, end) -> None:
"""Hook for subclass to apply changes.
Method executed by constructor prior to overwritting special dates.
Notes
-----
Incorporated to provide hook to `exchange_calendar_xkrx`.
"""
return None
# ------------------------------------------------------------------
# -- NO method below this line should be overriden on a subclass! --
# ------------------------------------------------------------------
# Methods and properties that define calendar (continued...).
@lazyval
def day(self):
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
)
@classmethod
def valid_sides(cls) -> list[str]:
"""List of valid `side` options."""
if cls.close_times == cls.open_times:
return ["left", "right"]
else:
return ["both", "left", "right", "neither"]
@classmethod
def default_side(cls) -> str:
"""Default `side` option."""
if cls.close_times == cls.open_times:
return "right"
else:
return "both"
@property
def side(self) -> str:
"""Side on which sessions are closed.
Returns
-------
str
"left" - Session open and break_start are trading minutes.
Session close and break_end are not trading minutes.
"right" - Session close and break_end are trading minutes,
Session open and break_start are not tradng minutes.
"both" - Session open, session close, break_start and
break_end are all trading minutes.
"neither" - Session open, session close, break_start and
break_end are all not trading minutes.
Notes
-----
Subclasses should NOT override this method.
"""
return self._side
# Properties covering all sessions.
@property
def sessions(self) -> pd.DatetimeIndex:
"""All calendar sessions."""
return self.schedule.index
@functools.lru_cache(maxsize=1)
def _sessions_nanos(self) -> np.ndarray:
return self.sessions.values.astype("int64")
@property
def sessions_nanos(self) -> np.ndarray:
"""All calendar sessions as nano seconds."""
return self._sessions_nanos()
@property
def opens(self) -> pd.Series:
"""Open time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Open time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_open
@property
def closes(self) -> pd.Series:
"""Close time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Close time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-start time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-end time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_end
@functools.lru_cache(maxsize=1)
def _first_minutes_nanos(self) -> np.ndarray:
if self.side in self._LEFT_SIDES:
return self.opens_nanos
else:
return one_minute_later(self.opens_nanos)
@property
def first_minutes_nanos(self) -> np.ndarray:
return self._first_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _last_minutes_nanos(self) -> np.ndarray:
if self.side in self._RIGHT_SIDES:
return self.closes_nanos
else:
return one_minute_earlier(self.closes_nanos)
@property
def last_minutes_nanos(self) -> np.ndarray:
return self._last_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _last_am_minutes_nanos(self) -> np.ndarray:
if self.side in self._RIGHT_SIDES:
return self.break_starts_nanos
else:
return one_minute_earlier(self.break_starts_nanos)
@property
def last_am_minutes_nanos(self) -> np.ndarray:
return self._last_am_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _first_pm_minutes_nanos(self) -> np.ndarray:
if self.side in self._LEFT_SIDES:
return self.break_ends_nanos
else:
return one_minute_later(self.break_ends_nanos)
@property
def first_pm_minutes_nanos(self) -> np.ndarray:
return self._first_pm_minutes_nanos()
def _minutes_as_series(self, nanos: np.ndarray, name: str) -> pd.Series:
"""Convert trading minute nanos to pd.Series."""
ser = pd.Series(pd.DatetimeIndex(nanos, tz=UTC), index=self.sessions)
ser.name = name
return ser
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session."""
return self._minutes_as_series(self.first_minutes_nanos, "first_minutes")
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._minutes_as_series(self.last_minutes_nanos, "last_minutes")
@property
def last_am_minutes(self) -> pd.Series:
"""Last am trading minute of each session."""
return self._minutes_as_series(self.last_am_minutes_nanos, "last_am_minutes")
@property
def first_pm_minutes(self) -> pd.Series:
"""First pm trading minute of each session."""
return self._minutes_as_series(self.first_pm_minutes_nanos, "first_pm_minutes")
# Properties covering all minutes.
def _minutes(self, side: str) -> pd.DatetimeIndex:
return pd.DatetimeIndex(
compute_minutes(
self.opens_nanos,
self.break_starts_nanos,
self.break_ends_nanos,
self.closes_nanos,
side,
),
tz=UTC,
)
@lazyval
def minutes(self) -> pd.DatetimeIndex:
"""All trading minutes."""
return self._minutes(self.side)
@lazyval
def minutes_nanos(self) -> np.ndarray:
"""All trading minutes as nanoseconds."""
return self.minutes.values.astype(np.int64)
# Calendar properties.
@property
def first_session(self) -> pd.Timestamp:
"""First calendar session."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last calendar session."""
return self.sessions[-1]
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of calendar's first session."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of calendar's last session."""
return self.closes[-1]
@property
def first_minute(self) -> pd.Timestamp:
"""Calendar's first trading minute."""
return pd.Timestamp(self.minutes_nanos[0], tz=UTC)
@property
def last_minute(self) -> pd.Timestamp:
"""Calendar's last trading minute."""
return pd.Timestamp(self.minutes_nanos[-1], tz=UTC)
@property
def has_break(self) -> bool:
"""Query if any calendar session has a break."""
return self.sessions_has_break(
self.first_session, self.last_session, _parse=False
)
@property
def late_opens(self) -> pd.DatetimeIndex:
"""Sessions that open later than the prevailing normal open.
NB. Prevailing normal open as defined by `open_times`.
"""
return self._late_opens
@property
def early_closes(self) -> pd.DatetimeIndex:
"""Sessions that close earlier than the prevailing normal close.
NB. Prevailing normal close as defined by `close_times`.
"""
return self._early_closes
# Methods that interrogate a given session.
def _get_session_idx(self, session: Date, _parse=True) -> int:
"""Index position of a session."""
session_ = parse_session(self, session) if _parse else session
if TYPE_CHECKING:
assert isinstance(session_, pd.Timestamp)
return self.sessions_nanos.searchsorted(session_.value, side="left")
def session_open(self, session_label: Session, _parse: bool = True) -> pd.Timestamp:
"""Return open time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_open"].tz_localize(UTC)
def session_close(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return close time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_close"].tz_localize(UTC)
def session_break_start(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-start time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_start = self.schedule.at[session_label, "break_start"]
if not pd.isnull(break_start):
break_start = break_start.tz_localize(UTC)
return break_start
def session_break_end(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-end time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_end = self.schedule.at[session_label, "break_end"]
if not pd.isnull(break_end):
break_end = break_end.tz_localize(UTC)
return break_end
def session_open_close(
self, session: Session, _parse: bool = True
) -> tuple[pd.Timestamp, pd.Timestamp]:
"""Return open and close times for a given session.
Parameters
----------
session
Session for which require open and close.
Returns
-------
tuple[pd.Timestamp, pd.Timestamp]
[0] Open time of `session`.
[1] Close time of `session`.
"""
if _parse:
session = parse_session(self, session)
return self.session_open(session), self.session_close(session)
def session_break_start_end(
self, session: Session, _parse: bool = True
) -> tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]:
"""Return break-start and break-end times for a given session.
Parameters
----------
session
Session for which require break-start and break-end.
Returns
-------
tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]
[0] Break-start time of `session`, or pd.NaT if no break.
[1] Close time of `session`, or pd.NaT if no break.
"""
if _parse:
session = parse_session(self, session)
return self.session_break_start(session), self.session_break_end(session)
def _get_session_minute_from_nanos(
self, session: Session, nanos: np.ndarray, _parse: bool
) -> pd.Timestamp:
idx = self._get_session_idx(session, _parse=_parse)
return pd.Timestamp(nanos[idx], tz=UTC)
def session_first_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return first trading minute of a given session."""
nanos = self.first_minutes_nanos
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return last trading minute of a given session."""
nanos = self.last_minutes_nanos
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_am_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return last trading minute of am subsession of a given session."""
nanos = self.last_am_minutes_nanos
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_pm_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return first trading minute of pm subsession of a given session."""
nanos = self.first_pm_minutes_nanos
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_last_minute(
self,
session: Session,
_parse: bool = True,
) -> tuple(pd.Timestamp, pd.Timestamp):
"""Return first and last trading minutes of a given session."""
idx = self._get_session_idx(session, _parse=_parse)
first = pd.Timestamp(self.first_minutes_nanos[idx], tz=UTC)
last = pd.Timestamp(self.last_minutes_nanos[idx], tz=UTC)
return (first, last)
def session_has_break(self, session: Session, _parse: bool = True) -> bool:
"""Query if a given session has a break.
Parameters
----------
session
Session to query.
Returns
-------
bool
True if `session` has a break, false otherwise.
"""
if _parse:
session = parse_session(self, session)
return pd.notna(self.session_break_start(session))
def next_session(self, session: Session, _parse: bool = True) -> pd.Timestamp:
"""Return session that immediately follows a given session.
Parameters
----------
session
Session whose next session is desired.
Raises
------
ValueError
If `session` is the last calendar session.
See Also
--------
date_to_session
"""
idx = self._get_session_idx(session, _parse=_parse)
try:
return self.schedule.index[idx + 1]
except IndexError as err:
if idx == len(self.schedule.index) - 1:
raise ValueError(
"There is no next session as this is the end"
" of the exchange calendar."
) from err
else:
raise
def previous_session(self, session: Session, _parse: bool = True) -> pd.Timestamp:
"""Return session that immediately preceeds a given session.
Parameters
----------
session
Session whose previous session is desired.
Raises
------
ValueError
If `session` is the first calendar session.
See Also
--------
date_to_session
"""
idx = self._get_session_idx(session, _parse=_parse)
if idx == 0:
raise ValueError(
"There is no previous session as this is the"
" beginning of the exchange calendar."
)
return self.schedule.index[idx - 1]
def session_minutes(
self, session: Session, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return trading minutes corresponding to a given session.
Parameters
----------
session
Session for which require trading minutes.
Returns
-------
pd.DateTimeIndex
Trading minutes for `session`.
"""
first, last = self.session_first_last_minute(session, _parse=_parse)
return self.minutes_in_range(start_minute=first, end_minute=last)
def session_offset(
self, session: Session, count: int, _parse: bool = True
) -> pd.Timestamp:
"""Offset a given session by a number of sessions.
Parameters
----------
session
Session from which to offset.
count
Number of sessions to offset `session`. Positive to offset
forwards, negative to offset backwards.
Returns
-------
pd.Timestamp
Offset session.
Raises
------
exchange_calendars.errors.RequestedSessionOutOfBounds
If offset session would be either before the calendar's first
session or after the calendar's last session.
"""
idx = self._get_session_idx(session, _parse=_parse) + count
if idx >= len(self.sessions):
raise errors.RequestedSessionOutOfBounds(self, too_early=False)
elif idx < 0:
raise errors.RequestedSessionOutOfBounds(self, too_early=True)
return self.sessions[idx]
# Methods that interrogate a date.
def _get_date_idx(self, date: Date, _parse=True) -> int:
"""Index position of a date.
Returns
-------
Index position of session if `date` represents a session,
otherwise index position of session that immediately
follows `date`.
"""
date_ = parse_date(date, "date", self) if _parse else date
if TYPE_CHECKING:
assert isinstance(date_, pd.Timestamp)
return self.sessions_nanos.searchsorted(date_.value, side="left")
def _date_oob(self, date: pd.Timestamp) -> bool:
"""Is `date` out-of-bounds."""
return (
date.value < self.sessions_nanos[0] or date.value > self.sessions_nanos[-1]
)
def is_session(self, dt: Date, _parse: bool = True) -> bool:
"""Query if a date is a valid session.
Parameters
----------
dt
Date to be queried.
Return
------
bool
True if `dt` is a session, False otherwise.
"""
if _parse:
dt = parse_date(dt, "dt", self)
idx = self._get_date_idx(dt, _parse=False)
return bool(self.sessions_nanos[idx] == dt.value) # convert from np.bool_
def date_to_session(
self,
date: Date,
direction: str = "none", # when min 3.8, Literal["none", "previous", "next"]
_parse: bool = True,
) -> pd.Timestamp:
"""Return a session corresponding to a given date.
Parameters
----------
date
Date for which require session. Can be a date that does not
represent an actual session (see `direction`).
direction : default: "none"
Defines behaviour if `date` does not represent a session:
"next" - return first session following `date`.
"previous" - return first session prior to `date`.
"none" - raise ValueError.
See Also
--------
next_session
previous_session
"""
if _parse:
date = parse_date(date, calendar=self)
if self.is_session(date, _parse=False):
return date
elif direction in ["next", "previous"]:
idx = self._get_date_idx(date, _parse=False)
if direction == "previous":
idx -= 1
return self.sessions[idx]
elif direction == "none":
raise ValueError(
f"`date` '{date}' does not represent a session. Consider passing"
" a `direction`."
)
else:
raise ValueError(
f"'{direction}' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
# Methods that interrogate a given minute (trading or non-trading).
def _get_minute_idx(self, minute: Minute, _parse=True) -> int:
"""Index position of a minute.
Returns
-------
Index position of trading minute if `minute` represents a
trading minute, otherwise index position of trading
minute that immediately follows `minute`.
"""
if _parse:
minute = parse_timestamp(minute, "minute", self)
return self.minutes_nanos.searchsorted(minute.value, side="left")
def _minute_oob(self, minute: Minute) -> bool:
"""Is `minute` out-of-bounds."""
return (
minute.value < self.minutes_nanos[0]
or minute.value > self.minutes_nanos[-1]
)
def is_trading_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is a trading minute.
Minutes during breaks are not considered trading minutes.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a trading minute.
See Also
--------
is_open_on_minute
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
idx = self._get_minute_idx(minute, _parse=False)
# convert from np.bool_
return bool(self.minutes_nanos[idx] == minute.value)
def is_break_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is within a break.
Note: `self.side` determines whether either, both or one of break
start and break end are treated as break minutes.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a break minute.
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
session_idx = np.searchsorted(self.first_minutes_nanos, minute.value) - 1
break_start = self.last_am_minutes_nanos[session_idx]
break_end = self.first_pm_minutes_nanos[session_idx]
# NaT comparisions evalute as False
numpy_bool = break_start < minute.value < break_end
return bool(numpy_bool)
def is_open_on_minute(
self, dt: Minute, ignore_breaks: bool = False, _parse: bool = True
) -> bool:
"""Query if exchange is open on a given minute.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
dt
Minute being queried.
ignore_breaks
Should exchange be considered open during any break?
True - treat exchange as open during any break.
False - treat exchange as closed during any break.
Returns
-------
bool
Boolean indicting if exchange is open on `dt`.
See Also
--------
is_trading_minute
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
is_trading_minute = self.is_trading_minute(dt, _parse=False)
if is_trading_minute or not ignore_breaks:
return is_trading_minute
else:
# not a trading minute although should return True if in break
return self.is_break_minute(dt, _parse=False)
def next_open(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return next open that follows a given minute.
If `dt` is a session open, the next session's open will be
returned.
Parameters
----------
dt
Minute for which to get the next open.
Returns
-------
pd.Timestamp
UTC timestamp of the next open.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = next_divider_idx(self.opens_nanos, dt.value)
except IndexError:
if dt.tz_convert(None) >= self.opens[-1]:
raise ValueError(
"Minute cannot be the last open or later (received `dt`"
f" parsed as '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.opens_nanos[idx], tz=UTC)
def next_close(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return next close that follows a given minute.
If `dt` is a session close, the next session's close will be
returned.
Parameters
----------
dt
Minute for which to get the next close.
Returns
-------
pd.Timestamp
UTC timestamp of the next close.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = next_divider_idx(self.closes_nanos, dt.value)
except IndexError:
if dt.tz_convert(None) == self.closes[-1]:
raise ValueError(
"Minute cannot be the last close (received `dt` parsed as"
f" '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.closes_nanos[idx], tz=UTC)
def previous_open(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return previous open that preceeds a given minute.
If `dt` is a session open, the previous session's open will be
returned.
Parameters
----------
dt
Minute for which to get the previous open.
Returns
-------
pd.Timestamp
UTC timestamp of the previous open.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = previous_divider_idx(self.opens_nanos, dt.value)
except ValueError:
if dt.tz_convert(None) == self.opens[0]:
raise ValueError(
"Minute cannot be the first open (received `dt` parsed as"
f" '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.opens_nanos[idx], tz=UTC)
def previous_close(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return previous close that preceeds a given minute.
If `dt` is a session close, the previous session's close will be
returned.
Parameters
----------
dt
Minute for which to get the previous close.
Returns
-------
pd.Timestamp
UTC timestamp of the previous close.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = previous_divider_idx(self.closes_nanos, dt.value)
except ValueError:
if dt.tz_convert(None) <= self.closes[0]:
raise ValueError(
"Minute cannot be the first close or earlier (received"
f" `dt` parsed as '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.closes_nanos[idx], tz=UTC)
def next_minute(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return trading minute that immediately follows a given minute.
Parameters
----------
dt
Minute for which to get next trading minute. Minute can be a
trading or a non-trading minute.
Returns
-------
pd.Timestamp
UTC timestamp of the next minute.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = next_divider_idx(self.minutes_nanos, dt.value)
except IndexError:
# dt > last_minute handled via parsing
if dt == self.last_minute:
raise ValueError(
"Minute cannot be the last trading minute or later"
f" (received `dt` parsed as '{dt}'.)"
) from None
return self.minutes[idx]
def previous_minute(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return trading minute that immediately preceeds a given minute.
Parameters
----------
dt
Minute for which to get previous trading minute. Minute can be
a trading or a non-trading minute.
Returns
-------
pd.Timestamp
UTC timestamp of the previous minute.
"""
if _parse:
dt = parse_timestamp(dt, "dt", self)
try:
idx = previous_divider_idx(self.minutes_nanos, dt.value)
except ValueError:
# dt < first_minute handled via parsing
if dt == self.first_minute:
raise ValueError(
"Minute cannot be the first trading minute or earlier"
f" (received `dt` parsed as '{dt}'.)"
) from None
return self.minutes[idx]
# NOTE: when min to 3.8, direction annotation to Literal["next", "previous", "none"]
def minute_to_session(
self,
minute: Minute,
direction: str = "next",
_parse: bool = True,
) -> pd.Timestamp:
"""Get session corresponding with a trading or break minute.
Parameters
----------
minute
Minute for which require corresponding session.
direction
How to resolve session in event that `minute` is not a trading
or break minute:
"next" (default) - return first session subsequent to
`minute`.
"previous" - return first session prior to `minute`.
"none" - raise ValueError.
Returns
-------
pd.Timestamp
Corresponding session label.
Raises
------
ValueError
If `minute` is not a trading minute and `direction` is "none".
See Also
--------
minute_to_past_session
minute_to_future_session
session_offset
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
if minute.value < self.minutes_nanos[0]:
# Resolve call here.
if direction == "next":
return self.first_session
else:
raise ValueError(
"Received `minute` as '{0}' although this is earlier than the"
" calendar's first trading minute ({1}). Consider passing"
" `direction` as 'next' to get first session.".format(
minute, self.first_minute
)
)
if minute.value > self.minutes_nanos[-1]:
# Resolve call here.
if direction == "previous":
return self.last_session
else:
raise ValueError(
"Received `minute` as '{0}' although this is later than the"
" calendar's last trading minute ({1}). Consider passing"
" `direction` as 'previous' to get last session.".format(
minute, self.last_minute
)
)
idx = np.searchsorted(self.last_minutes_nanos, minute.value)
current_or_next_session = self.schedule.index[idx]
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not self.is_open_on_minute(minute, ignore_breaks=True, _parse=False):
return self.schedule.index[idx - 1]
elif direction == "none":
if not self.is_open_on_minute(minute, ignore_breaks=True, _parse=False):
# if the exchange is closed, blow up
raise ValueError(
f"`minute` '{minute}' is not a trading minute. Consider passing"
" `direction` as 'next' or 'previous'."
)
else:
# invalid direction
raise ValueError("Invalid direction parameter: " "{0}".format(direction))
return current_or_next_session
def minute_to_past_session(
self, minute: Minute, count: int = 1, _parse: bool = True
) -> pd.Timestamp:
"""Get a session that closed before a given minute.
Parameters
----------
minute
Minute for which to return a previous session. Can be a
trading minute or non-trading minute.
Note: if `minute` is a trading minute then returned session
will not be the session of which `minute` is a trading minute,
but rather a session that closed before `minute`.
count : default: 1
Number of sessions prior to `minute` for which require session.
Returns
-------
pd.Timstamp
Session that is `count` full sessions before `minute`.
See Also
--------
minute_to_session
minute_to_future_session
session_offset
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
if count <= 0:
raise ValueError("`count` must be higher than 0.")
if self.is_open_on_minute(minute, ignore_breaks=True, _parse=False):
current_session = self.minute_to_session(minute, _parse=False)
if current_session == self.first_session:
raise errors.RequestedSessionOutOfBounds(self, too_early=True)
base_session = self.previous_session(current_session, _parse=False)
else:
base_session = self.minute_to_session(minute, "previous", _parse=False)
count -= 1
return self.session_offset(base_session, -count, _parse=False)
def minute_to_future_session(
self,
minute: Minute,
count: int = 1,
_parse: bool = True,
) -> pd.Timestamp:
"""Get a session that opens after a given minute.
Parameters
----------
minute
Minute for which to return a future session. Can be a trading
minute or non-trading minute.
Note: if `minute` is a trading minute then returned session
will not be the session of which `minute` is a trading minute,
but rather a session that opens after `minute`.
count : default: 1
Number of sessions following `minute` for which require
session.
Returns
-------
pd.Timstamp
Session that is `count` full sessions after `minute`.
See Also
--------
minute_to_session
minute_to_past_session
session_offset
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
if count <= 0:
raise ValueError("`count` must be higher than 0.")
if self.is_open_on_minute(minute, ignore_breaks=True, _parse=False):
current_session = self.minute_to_session(minute, _parse=False)
if current_session == self.last_session:
raise errors.RequestedSessionOutOfBounds(self, too_early=False)
base_session = self.next_session(current_session, _parse=False)
else:
base_session = self.minute_to_session(minute, "next", _parse=False)
count -= 1
return self.session_offset(base_session, count, _parse=False)
# NOTE: when min to 3.8, direction annotation to Literal["next", "previous", "none"]
def minute_to_trading_minute(
self, minute: Minute, direction: str = "none", _parse: bool = True
) -> pd.Timestamp:
"""Resolve a minute to a trading minute.
Differs from `previous_minute` and `next_minute` by returning
`minute` unchanged if `minute` is a trading minute.
Parameters
----------
minute
Timestamp to be resolved to a trading minute.
direction:
How to resolve `minute` if does not represent a trading minute:
'next' - return trading minute that immediately follows
`minute`.
'previous' - return trading minute that immediately
preceeds `minute`.
'none' - raise KeyError
Returns
-------
pd.Timestamp
Returns `minute` if `minute` is a trading minute otherwise
first trading minute that, in accordance with `direction`,
either immediately follows or preceeds `minute`.
Raises
------
ValueError
If `minute` is not a trading minute and `direction` is None.
See Also
--------
next_mintue
previous_minute
"""
if _parse:
minute = parse_timestamp(minute, calendar=self)
if self.is_trading_minute(minute, _parse=False):
return minute
elif direction == "next":
return self.next_minute(minute, _parse=False)
elif direction == "previous":
return self.previous_minute(minute, _parse=False)
else:
raise ValueError(
f"`minute` '{minute}' is not a trading minute. Consider passing"
" `direction` as 'next' or 'previous'."
)
def minute_offset(
self, minute: TradingMinute, count: int, _parse: bool = True
) -> pd.Timestamp:
"""Offset a given trading minute by a number of trading minutes.
Parameters
----------
minute
Trading minute from which to offset.
count
Number of trading minutes to offset `minute`. Positive to
offset forwards, negative to offset backwards.
Returns
-------
pd.Timstamp
Offset trading minute.
Raises
------
ValueError
If offset minute would be either before the calendar's first
trading minute or after the calendar's last trading minute.
"""
if _parse:
minute = parse_trading_minute(self, minute)
idx = self._get_minute_idx(minute) + count
if idx >= len(self.minutes_nanos):
raise errors.RequestedMinuteOutOfBounds(self, too_early=False)
elif idx < 0:
raise errors.RequestedMinuteOutOfBounds(self, too_early=True)
return self.minutes[idx]
def minute_offset_by_sessions(
self,
minute: TradingMinute,
count: int = 1,
_parse: bool = True,
) -> pd.Timestamp:
"""Offset trading minute by a given number of sessions.
If trading minute is not represented in target session (due to a late
open for example) then offset minute will be rolled (with respect to
the target session):
- forwards to first session minute, if offset minute otherwise
falls earlier than first session minute.
- back to last session minute, if offset minute otherwise falls
later than last session minute.
- back to last minute before break, if offset otherwise
falls in session break.
Parameters
----------
minute
Trading minute to be offset.
count
Number of sessions by which to offset trading minute. Negative
to offset to an earlier session.
"""
if _parse:
minute = parse_trading_minute(self, minute)
if not count:
return minute
if count > 0:
try:
target_session = self.minute_to_future_session(minute, abs(count))
except errors.RequestedSessionOutOfBounds:
raise errors.RequestedMinuteOutOfBounds(self, too_early=False)
else:
try:
target_session = self.minute_to_past_session(minute, abs(count))
except errors.RequestedSessionOutOfBounds:
raise errors.RequestedMinuteOutOfBounds(self, too_early=True)
base_session = self.minute_to_session(minute)
day_offset = (minute.normalize() - base_session.normalize()).days
minute = target_session.replace(hour=minute.hour, minute=minute.minute)
minute += pd.Timedelta(days=day_offset)
if self._minute_oob(minute):
if minute.value < self.minutes_nanos[0]:
errors.RequestedMinuteOutOfBounds(self, too_early=True)
if minute.value > self.minutes_nanos[-1]:
raise errors.RequestedMinuteOutOfBounds(self, too_early=False)
if self.is_trading_minute(minute, _parse=False):
# this guard is necessary as minute can be for a different session than the
# intended if the gap between sessions is less than any difference in the
# open or close times (i.e. only relevant if base and target sessions have
# different open/close times.
if self.minute_to_session(minute, _parse=False) == target_session:
return minute
first_minute = self.session_first_minute(target_session, _parse=False)
if minute < first_minute:
return first_minute
last_minute = self.session_last_minute(target_session, _parse=False)
if minute > last_minute:
return last_minute
elif self.is_break_minute(minute, _parse=False):
return self.session_last_am_minute(target_session, _parse=False)
assert False, "offset minute should have resolved!"
# Methods that evaluate or interrogate a range of minutes.
def _get_minutes_slice(self, start: Minute, end: Minute, _parse=True) -> slice:
"""Slice representing a range of trading minutes."""
if _parse:
start = parse_timestamp(start, "start", self)
end = parse_timestamp(end, "end", self)
slice_start = self.minutes_nanos.searchsorted(start.value, side="left")
slice_end = self.minutes_nanos.searchsorted(end.value, side="right")
return slice(slice_start, slice_end)
def minutes_in_range(
self, start_minute: Minute, end_minute: Minute, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return all trading minutes between given minutes.
Parameters
----------
start_minute
Minute representing start of desired range. Can be a trading
minute or non-trading minute.
end_minute
Minute representing end of desired range. Can be a trading
minute or non-trading minute.
"""
slc = self._get_minutes_slice(start_minute, end_minute, _parse)
return self.minutes[slc]
def minutes_window(
self, start_dt: TradingMinute, count: int, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return block of given size of consecutive trading minutes.
Parameters
----------
start_dt
Minute representing the first (if `count` positive) or last
(if `count` negative) minute of minutes window.
count
Number of mintues to include in window in addition to
`start_dt` (i.e. 0 will return block of length 1 with
`start_dt` as only value).
Positive to return block of minutes from `start_dt`
Negative to return block of minutes to `start_dt`.
"""
if _parse:
start_dt = parse_trading_minute(self, start_dt, "start_dt")
start_idx = self._get_minute_idx(start_dt, _parse=False)
end_idx = start_idx + count
if end_idx < 0:
raise ValueError(
f"Minutes window cannot begin before the calendar's first"
f" trading minute ({self.first_minute}). `count`"
f" cannot be lower than {count - end_idx} for `start`"
f" '{start_dt}'."
)
elif end_idx >= len(self.minutes_nanos):
raise ValueError(
f"Minutes window cannot end after the calendar's last"
f" trading minute ({self.last_minute}). `count`"
f" cannot be higher than"
f" {count - (end_idx - len(self.minutes_nanos) + 1)} for"
f" `start` '{start_dt}'."
)
return self.minutes[min(start_idx, end_idx) : max(start_idx, end_idx) + 1]
def minutes_distance(self, start: Minute, end: Minute, _parse: bool = True) -> int:
"""Return the number of minutes in a range.
Parameters
----------
start
Start of minute range (range inclusive of `start`).
end
End of minute range (range inclusive of `end`).
Returns
-------
int
Number of minutes in minute range, If `start` is later than
`end` then return will be negated.
"""
if _parse:
start = parse_timestamp(start, "start", self)
end = parse_timestamp(end, "end", self)
negate = end < start
if negate:
start, end = end, start
slc = self._get_minutes_slice(start, end, _parse=False)
return slc.start - slc.stop if negate else slc.stop - slc.start
def minutes_to_sessions(self, minutes: pd.DatetimeIndex) -> pd.DatetimeIndex:
"""Return sessions corresponding to multiple trading minutes.
For the purpose of this method trading minutes are considered as:
- Trading minutes as determined by `self.side`.
- All minutes of any breaks.
Parameters
----------
minutes
Sorted DatetimeIndex representing market minutes for which to get
corresponding sessions.
Returns
-------
pd.DatetimeIndex
Sessions corresponding to `minutes`.
Raises
------
ValueError
If any indice of `minute` is not a trading minute.
"""
if not minutes.is_monotonic_increasing:
raise ValueError("`index` must be ordered.")
# Find the indices of the previous first session minute and the next
# last session minute for each minute.
index_nanos = minutes.values.astype(np.int64)
first_min_nanos = self.first_minutes_nanos
last_min_nanos = self.last_minutes_nanos
prev_first_mins_idxs = (
first_min_nanos.searchsorted(index_nanos, side="right") - 1
)
next_last_mins_idxs = last_min_nanos.searchsorted(index_nanos, side="left")
# If they don't match, the minute is outside the trading day. Barf.
mismatches = prev_first_mins_idxs != next_last_mins_idxs
if mismatches.any():
# Show the first bad minute in the error message.
bad_ix = np.flatnonzero(mismatches)[0]
example = minutes[bad_ix]
prev_session_idx = prev_first_mins_idxs[bad_ix]
prev_first_min = pd.Timestamp(first_min_nanos[prev_session_idx], tz=UTC)
prev_last_min = pd.Timestamp(last_min_nanos[prev_session_idx], tz=UTC)
next_first_min = pd.Timestamp(first_min_nanos[prev_session_idx + 1], tz=UTC)
next_last_min = pd.Timestamp(last_min_nanos[prev_session_idx + 1], tz=UTC)
raise ValueError(
f"{mismatches.sum()} non-trading minutes in"
f" minutes_to_sessions:\nFirst Bad Minute: {example}\n"
f"Previous Session: {prev_first_min} -> {prev_last_min}\n"
f"Next Session: {next_first_min} -> {next_last_min}"
)
return self.schedule.index[prev_first_mins_idxs]
# Methods that evaluate or interrogate a range of sessions.
def _parse_start_end_dates(
self, start: Date, end: Date, _parse: bool
) -> tuple[pd.Timestamp, pd.Timestamp]:
if not _parse:
return start, end
return parse_date(start, "start", self), parse_date(end, "end", self)
def _get_sessions_slice(self, start: Date, end: Date, _parse=True) -> slice:
"""Slice representing a range of sessions."""
start, end = self._parse_start_end_dates(start, end, _parse)
slice_start = self.sessions_nanos.searchsorted(start.value, side="left")
slice_end = self.sessions_nanos.searchsorted(end.value, side="right")
return slice(slice_start, slice_end)
def sessions_in_range(
self, start_session_label: Date, end_session_label: Date, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return sessions within a given range.
Parameters
----------
start_session_label
Start of session range (range inclusive of `start`).
end_session_label
End of session range (range inclusive of `end`).
Returns
-------
pd.DatetimeIndex
Sessions from `start_session_label` through `end_session_label`.
"""
slc = self._get_sessions_slice(start_session_label, end_session_label, _parse)
return self.sessions[slc]
def sessions_has_break(self, start: Date, end: Date, _parse: bool = True) -> bool:
"""Query if at least one session in a session range has a break.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
bool
True if any session in session range has a break, False otherwise.
"""
slc = self._get_sessions_slice(start, end, _parse)
return self.break_starts[slc].notna().any()
def sessions_window(
self, session_label: Session, count: int, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return block of given size of consecutive sessions.
Parameters
----------
session_label
Session representing the first (if `count` positive) or last
(if `count` negative) session of session window.
count
Number of sessions to include in window in addition to
`session_label` (i.e. 0 will return window of length 1 with
`session_label` as only value).
Positive to return window of sessions from `session_label`
Negative to return window of sessions to `session_label`.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
start_idx = self._get_session_idx(session_label, _parse=False)
end_idx = start_idx + count
if end_idx < 0:
raise ValueError(
f"Sessions window cannot begin before the first calendar session"
f" ({self.first_session}). `count` cannot be lower than"
f" {count - end_idx} for `session` '{session_label}'."
)
elif end_idx >= len(self.sessions):
raise ValueError(
f"Sessions window cannot end after the last calendar session"
f" ({self.last_session}). `count` cannot be higher than"
f" {count - (end_idx - len(self.sessions) + 1)} for"
f" `session` '{session_label}'."
)
return self.sessions[min(start_idx, end_idx) : max(start_idx, end_idx) + 1]
def sessions_distance(self, start: Date, end: Date, _parse: bool = True) -> int:
"""Return the number of sessions in a range.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
int
Number of sessions in session range, If `start` is later than
`end` then return will be negated.
"""
start, end = self._parse_start_end_dates(start, end, _parse)
negate = end < start
if negate:
start, end = end, start
slc = self._get_sessions_slice(start, end, _parse=False)
return slc.start - slc.stop if negate else slc.stop - slc.start
def sessions_minutes(
self, start: Date, end: Date, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return trading minutes over a sessions range.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
pd.DatetimeIndex
Trading minutes for sessions in range.
"""
start, end = self._parse_start_end_dates(start, end, _parse)
start = self.date_to_session(start, "next", _parse=False)
end = self.date_to_session(end, "previous", _parse=False)
first_minute = self.session_first_minute(start)
last_minute = self.session_last_minute(end)
return self.minutes_in_range(first_minute, last_minute)
def sessions_opens(self, start: Date, end: Date, _parse: bool = True) -> pd.Series:
"""Return UTC open time by session for sessions in given range.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
pd.Series
index:
Sessions from `start` through `end` (inclusive of both).
values:
UTC open times for corresponding sessions.
"""
start, end = self._parse_start_end_dates(start, end, _parse)
return self.schedule.loc[start:end, "market_open"].dt.tz_localize(UTC)
def sessions_closes(self, start: Date, end: Date, _parse: bool = True) -> pd.Series:
"""Return UTC close time by session for sessions in given range.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
pd.Series
index:
Sessions from `start` through `end` (inclusive of both).
values:
UTC close times for corresponding sessions.
"""
start, end = self._parse_start_end_dates(start, end, _parse)
return self.schedule.loc[start:end, "market_close"].dt.tz_localize(UTC)
def sessions_minutes_count(
self, start: Date, end: Date, _parse: bool = True
) -> int:
"""Return number of trading minutes in a range of sessions.
Parameters
----------
start
Start of session range (range inclusive of `start`).
end
End of session range (range inclusive of `end`).
Returns
-------
int
Total number of trading minutes in sessions range.
"""
slc = self._get_sessions_slice(start, end, _parse)
session_diff = self.last_minutes_nanos[slc] - self.first_minutes_nanos[slc]
session_diff += NANOSECONDS_PER_MINUTE
break_diff = self.first_pm_minutes_nanos[slc] - self.last_am_minutes_nanos[slc]
break_diff[break_diff != 0] -= NANOSECONDS_PER_MINUTE
nanos = session_diff - break_diff
return (nanos // NANOSECONDS_PER_MINUTE).sum()
def trading_index(
self,
start: Date,
end: Date,
period: pd.Timedelta | str,
intervals: bool = True,
closed: str = "left", # when move to min 3.8 Literal["left", "right", "both", "neither"]
force_close: bool = False,
force_break_close: bool = False,
force: bool | None = None,
curtail_overlaps: bool = False,
ignore_breaks: bool = False,
parse: bool = True,
) -> pd.DatetimeIndex | pd.IntervalIndex:
"""Create a trading index.
Create a trading index of given `period` over a given range of
dates.
NB. Which minutes the calendar treats as trading minutes, according
to `self.side`, is irrelevant in the evaluation of the trading
index.
NB. Execution time is related to the number of indices created. The
longer the range of dates covered and/or the shorter the period
(i.e. higher the frequency), the longer the execution. Whilst an
index with 4000 indices might be created in a couple of
miliseconds, a high frequency index with 2 million indices might
take a second or two.
Parameters
----------
start
Start of session range over which to create index.
end
End of session range over which to create index.
period
If `intervals` is True, the length of each interval. If
`intervals` is False, the distance between indices. Period
should be passed as a pd.Timedelta or a str that's acceptable
as a single input to pd.Timedelta. `period` cannot be greater
than 1 day.
Examples of valid `period` input:
pd.Timedelta(minutes=15), pd.Timedelta(minutes=15, hours=2)
'15min', '15T', '1H', '4h', '1d', '30s', '2s', '500ms'.
Examples of invalid `period` input:
'15minutes', '2d'.
intervals : default: True
True to return trading index as a pd.IntervalIndex with indices
representing explicit intervals.
False to return trading index as a pd.DatetimeIndex with
indices that implicitely represent a period according to
`closed`.
If `period` is '1d' then trading index will be returned as a
pd.DatetimeIndex.
closed : {"left", "right", "both", "neither"}
(ignored if `period` is '1d'.)
If `intervals` is True, the side that intervals should be
closed on. Must be either "left" or "right" (any time during a
session must belong to one interval and one interval only).
If `intervals` is False, the side of each period that an
indice should be defined. The first and last indices of each
(sub)session will be defined according to:
"left" - include left side of first period, do not include
right side of last period.
"right" - do not include left side of first period, include
right side of last period.
"both" - include both left side of first period and right
side of last period.
"neither" - do not include either left side of first period
or right side of last period.
NB if `period` is not a factor of the (sub)session length then
"right" or "both" will result in an indice being defined after
the (sub)session close. See `force_close` and
`force_break_close`.
force_close : default: False
(ignored if `force` is passed.)
(ignored if `period` is '1d')
(irrelevant if `intervals` is False and `closed` is "left" or
"neither")
Defines behaviour if right side of a session's last period
falls after the session close.
If True, defines right side of this period as session close.
If False, defines right side of this period after the session
close. In this case the represented period will include a
non-trading period.
force_break_close : default: False
(ignored if `force` is passed.)
(ignored if `period` is '1d'.)
(irrelevant if `intervals` is False and `closed` is "left" or
"neither.)
Defines behaviour if right side of last pre-break period falls
after the start of the break.
If True, defines right side of this period as break start.
If False, defines right side of this period after the break
start. In this case the represented period will include a
non-trading period.
force : optional
(ignored if `period` is '1d'.)
(irrelevant if `intervals` is False and `closed` is "left" or
"neither.)
Convenience option to set both `force_close` and
`force_break_close`. If passed then values passsed to
`force_close` and `force_break_close` will be ignored.
curtail_overlaps : default: False
(ignored if `period` is '1d')
(irrelevant if (`intervals` is False) or (`force_close` and
`force_break_close` are both True).)
Defines action to take if a period ends after the start of the
next period. (This can occur if `period` is longer
than a break or the gap between one session's close and the
next session's open.)
If True, the right of the earlier of two overlapping
periods will be curtailed to the left of the latter period.
(NB consequently the period length will not be constant for
all periods.)
If False, will raise IntervalsOverlapError.
ignore_breaks : default: False
(ignored if `period` is '1d'.)
(irrelevant if no session has a break)
Defines whether trading index should respect session breaks.
If False, treat sessions with breaks as comprising independent
morning and afternoon subsessions.
If True, treat all sessions as continuous, ignoring any
breaks.
parse : default: True
Determines if `start` and `end` values are parsed. If these
arguments are passed as pd.Timestamp with no time component
and tz as UTC then can pass `parse` as False to save around
500µs on the execution.
Returns
-------
pd.IntervalIndex or pd.DatetimeIndex
Trading index.
If `intervals` is False or `period` is '1d' then returned as a
pd.DatetimeIndex.
If `intervals` is True (default) returned as pd.IntervalIndex.
Raises
------
exchange_calendars.errors.IntervalsOverlapError
If `intervals` is True and right side of one or more indices
would fall after the left of the subsequent indice. This can
occur if `period` is longer than a break or the gap between one
session's close and the next session's open.
exchange_calendars.errors.IntervalsOverlapError
If `intervals` is False and an indice would otherwise fall to
the right of the subsequent indice. This can occur if `period`
is longer than a break or the gap between one session's close
and the next session's open.
Credit to @Stryder-Git at pandas_market_calendars for showing the
way with a vectorised solution to creating trading indices (a
variation of which is employed within the underlying _TradingIndex
class).
"""
start, end = self._parse_start_end_dates(start, end, parse)
if not isinstance(period, pd.Timedelta):
try:
period = pd.Timedelta(period)
except ValueError:
msg = (
f"`period` receieved as '{period}' although takes type"
" 'pd.Timedelta' or a type 'str' that is valid as a single input"
" to 'pd.Timedelta'. Examples of valid input: pd.Timestamp('15T'),"
" '15min', '15T', '1H', '4h', '1d', '5s', 500ms'."
)
raise ValueError(msg) from None
if period > | pd.Timedelta(1, "D") | pandas.Timedelta |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
df_strategy,
categoricaldf_strategy,
)
from janitor.functions import expand_grid
@given(df=df_strategy())
def test_others_not_dict(df):
"""Raise Error if `others` is not a dictionary."""
with pytest.raises(TypeError):
df.expand_grid("frame", others=[2, 3])
@given(df=df_strategy())
def test_others_none(df):
"""Return DataFrame if no `others`, and df exists."""
assert_frame_equal(df.expand_grid("df"), df)
def test_others_empty():
"""Return None if no `others`."""
assert (expand_grid(), None) # noqa : F631
@given(df=df_strategy())
def test_df_key(df):
"""Raise error if df exists and df_key is not supplied."""
with pytest.raises(KeyError):
expand_grid(df, others={"y": [5, 4, 3, 2, 1]})
@given(df=df_strategy())
def test_df_key_hashable(df):
"""Raise error if df exists and df_key is not Hashable."""
with pytest.raises(TypeError):
expand_grid(df, df_key=["a"], others={"y": [5, 4, 3, 2, 1]})
def test_numpy_zero_d():
"""Raise ValueError if numpy array dimension is zero."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([], dtype=int)})
def test_numpy_gt_2d():
"""Raise ValueError if numpy array dimension is greater than 2."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([[[2, 3]]])})
def test_series_empty():
"""Raise ValueError if Series is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.Series([], dtype=int)})
def test_dataframe_empty():
"""Raise ValueError if DataFrame is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.DataFrame([])})
def test_index_empty():
"""Raise ValueError if Index is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.Index([], dtype=int)})
@settings(deadline=None)
@given(df=df_strategy())
def test_series(df):
"""Test expand_grid output for Series input."""
A = df["a"]
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_series_dataframe(df):
"""Test expand_grid output for Series and DataFrame inputs."""
A = df["a"]
B = df.iloc[:, [1, 2]]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_series_multiindex_dataframe(df):
"""
Test expand_grid output
if the DataFrame's columns is a MultiIndex.
"""
A = df["a"]
B = df.iloc[:, [1, 2]]
B.columns = pd.MultiIndex.from_arrays([["C", "D"], B.columns])
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B.columns = B.columns.map("_".join)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_numpy_1d(df):
"""Test expand_grid output for a 1D numpy array."""
A = df["a"].to_numpy()
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]].rename(columns={"a": 0})
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_numpy_2d(df):
"""Test expand_grid output for a 2D numpy array"""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = base.to_numpy(dtype=int)
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.set_axis([0, 1], axis=1)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_index(df):
"""Test expand_grid output for a pandas Index that has a name."""
A = pd.Index(df["a"])
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_index_name_none(df):
"""Test expand_grid output for a pandas Index without a name."""
A = pd.Index(df["a"].array, name=None)
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays([["A", "B"], [0, "cities"]])
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_multiindex(df):
"""Test expand_grid output for a pandas MultiIndex with a name."""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = pd.MultiIndex.from_frame(base)
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.copy()
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_multiindex_names_none(df):
"""Test expand_grid output for a pandas MultiIndex without a name."""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = pd.MultiIndex.from_frame(base, names=[None, None])
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.copy()
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], ["names", 0, 1]]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_pandas_extension_array(df):
"""Test expand_grid output for a pandas array."""
A = df["a"]
B = df["cities"].astype("string").array
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]].astype("string").set_axis([0], axis=1)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
| assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Builds a MultiLayer network (HS, MM & DM) based on genes found by DGE with StringDB edges.
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
class TestTZConvert:
def test_tz_convert(self, frame_or_series):
rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern")
obj = DataFrame({"a": 1}, index=rng)
if frame_or_series is not DataFrame:
obj = obj["a"]
result = obj.tz_convert("Europe/Berlin")
expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin"))
if frame_or_series is not DataFrame:
expected = expected["a"]
assert result.index.tz.zone == "Europe/Berlin"
tm.assert_equal(result, expected)
def test_tz_convert_axis1(self):
rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern")
obj = DataFrame({"a": 1}, index=rng)
obj = obj.T
result = obj.tz_convert("Europe/Berlin", axis=1)
assert result.columns.tz.zone == "Europe/Berlin"
expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin"))
tm.assert_equal(result, expected.T)
def test_tz_convert_naive(self, frame_or_series):
# can't convert tz-naive
rng = date_range("1/1/2011", periods=200, freq="D")
ts = Series(1, index=rng)
ts = frame_or_series(ts)
with pytest.raises(TypeError, match="Cannot convert tz-naive"):
ts.tz_convert("US/Eastern")
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize(self, fn):
l0 = date_range("20140701", periods=5, freq="D")
l1 = date_range("20140701", periods=5, freq="D")
int_idx = Index(range(5))
if fn == "tz_convert":
l0 = l0.tz_localize("UTC")
l1 = l1.tz_localize("UTC")
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)("US/Pacific")
l1_expected = getattr(idx, fn)("US/Pacific")
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)("US/Pacific")
tm.assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
# freq is not preserved in MultiIndex construction
l1_expected = l1_expected._with_freq(None)
l0_expected = l0_expected._with_freq(None)
l1 = l1._with_freq(None)
l0 = l0._with_freq(None)
df3 = getattr(df2, fn)("US/Pacific", level=0)
assert not df3.index.levels[0].equals(l0)
tm.assert_index_equal(df3.index.levels[0], l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)("US/Pacific", level=1)
| tm.assert_index_equal(df3.index.levels[0], l0) | pandas._testing.assert_index_equal |
# Copyright 2019 <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Plot Service will make use of appropriately decorated functions in this module.
import datetime
import logging
import re
import time
from collections import namedtuple
from enum import auto
from numbers import Real
from dateutil import tz
import cachetools.func
import numpy as np
import pandas as pd
from pandas import Series
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, USMemorialDay, USLaborDay, USThanksgivingDay, \
nearest_workday
from gs_quant.api.gs.assets import GsIdType
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.fields import Fields
from gs_quant.datetime.gscalendar import GsCalendar
from gs_quant.datetime.point import relative_days_add
from gs_quant.errors import MqTypeError, MqValueError
from gs_quant.markets.securities import *
from gs_quant.markets.securities import Asset, AssetIdentifier, SecurityMaster
from gs_quant.target.common import AssetClass, FieldFilterMap, AssetType, Currency
from gs_quant.timeseries.helper import log_return, plot_measure
GENERIC_DATE = Union[datetime.date, str]
TD_ONE = datetime.timedelta(days=1)
_logger = logging.getLogger(__name__)
MeasureDependency: namedtuple = namedtuple("MeasureDependency", ["id_provider", "query_type"])
# TODO: get NERC Calendar from SecDB
class NercCalendar(AbstractHolidayCalendar):
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMemorialDay,
| Holiday('July 4th', month=7, day=4, observance=nearest_workday) | pandas.tseries.holiday.Holiday |
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from math import ceil
import numpy as np
import pandas as pd
from pytorch_transformers import RobertaConfig, RobertaTokenizer, RobertaModel
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics as mt
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from model import MatchArchitecture
from data_utils import MatchingDataset
RANDOM_SEED = 117
SEQ_LEN = 10
RNN_DIM = 64
LINEAR_DIM = 64
CLASSES = 1
ROBERTA_FEAT_SIZE = 768
ADDITIONAL_FEAT_SIZE = 0
F1_POS_THRESHHOLD = .3
epsilon = 1e-8
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
train_size = 8870
oof_preds = np.zeros((train_size, 1))
oof_preds2 = np.zeros((train_size, 1))
oof_labels = np.zeros((train_size, 1))
cur_oof_inx = 0
for fold in range(5):
VERSION = '1.1_fold_{}'.format(fold)
SAVE_DIR = '/ssd-1/clinical/clinical-abbreviations/checkpoints/{}.pt'.format(VERSION)
train_data_path = '/ssd-1/clinical/clinical-abbreviations/code/data/Train1_train.csv'
val_data_path = '/ssd-1/clinical/clinical-abbreviations/code/data/Train1_val.csv'
features_path = '/ssd-1/clinical/clinical-abbreviations/data/full_train.csv'
load_data = True
if load_data:
path = '/ssd-1/clinical/clinical-abbreviations/code/data/'
positives = pd.read_csv(path + 'Train1.csv', sep='|')
negatives = pd.read_csv(path + 'Train2.csv', sep='|')
train_strings = | pd.concat((positives, negatives), axis=0) | pandas.concat |
'''Python script to generate CAC'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class CAC:
def __init__(self, fin_perf, oper_metrics, oth_metrics):
print("INIT CAC")
self.fin_perf = | pd.DataFrame(fin_perf) | pandas.DataFrame |
# coding=utf-8
import platform
import time
import socket
import zlib
import threading
import json
from os import getenv
import six
import msgpack
import requests
import pandas as pd
from thriftpy2 import transport, protocol
from thriftpy2.rpc import make_client
try:
from urllib.parse import quote as urlquote
except ImportError:
from urllib import quote as urlquote
from .utils import classproperty, isatty, get_mac_address
from .version import __version__ as current_version
from .compat import pickle_compat as pc
from .thriftclient import thrift
from .api import * # noqa
if platform.system().lower() != "windows":
socket_error = (transport.TTransportException, socket.error, protocol.cybin.ProtocolError)
else:
socket_error = (transport.TTransportException, socket.error)
AUTH_API_URL = "https://dataapi.joinquant.com/apis" # 获取token
class JQDataClient(object):
_threading_local = threading.local()
_auth_params = {}
_default_host = "172.16.31.10"
_default_port = 7000
request_timeout = 300
request_attempt_count = 3
@classproperty
def _local_socket_timeout(cls):
"""本地网络超时时间
由于网络稍有延迟,将该时间设置为比服务器超时时间略长一点
否则会有服务端正常处理完,而客户端已超时断开的问题
"""
return cls.request_timeout + 5
@classmethod
def instance(cls):
_instance = getattr(cls._threading_local, '_instance', None)
if _instance is None:
if not cls._auth_params:
username = getenv("JQDATA_USERNAME")
password = getenv("<PASSWORD>")
if username and password:
cls._auth_params = {
"username": username,
"password": password,
"host": getenv("JQDATA_HOST") or cls._default_host,
"port": getenv("JQDATA_PORT") or cls._default_port,
"version": current_version,
}
if cls._auth_params:
_instance = JQDataClient(**cls._auth_params)
cls._threading_local._instance = _instance
return _instance
def __init__(self, host, port, username="", password="", token="", version=""):
self.host = host or self._default_host
self.port = int(port or self._default_port)
self.username = username
self.password = password
self.token = token
self.version = version
assert self.host, "host is required"
assert self.port, "port is required"
assert self.username or self.token, "username is required"
assert self.password or self.token, "password is required"
self.client = None
self.inited = False
self.not_auth = True
self.compress = True
self.data_api_url = ""
self._http_token = ""
@classmethod
def set_request_params(cls, **params):
if "request_timeout" in params:
request_timeout = params["request_timeout"]
if not request_timeout:
cls.request_timeout = None
else:
request_timeout = float(request_timeout)
cls.request_timeout = (
request_timeout if request_timeout > 0 else None
)
instance = cls.instance()
if instance and instance.inited and instance.client:
try:
try:
sock = instance.client._iprot.trans._trans.sock
except AttributeError:
sock = instance.client._iprot.trans.sock
sock.settimeout(cls.request_timeout)
except Exception:
pass
if "request_attempt_count" in params:
request_attempt_count = int(params["request_attempt_count"])
if request_attempt_count > 10:
raise Exception("请求的尝试次数不能大于 10 次")
cls.request_attempt_count = request_attempt_count
@classmethod
def set_auth_params(cls, **params):
if params != cls._auth_params and cls.instance():
cls.instance()._reset()
cls.instance()._threading_local._instance = None
cls._auth_params = params
cls.instance().ensure_auth()
def _create_client(self):
self.client = make_client(
thrift.JqDataService,
self.host,
self.port,
timeout=(self.request_timeout * 1000)
)
return self.client
def ensure_auth(self):
if not self.inited:
if not self.username and not self.token:
raise RuntimeError("not inited")
self._create_client()
self.inited = True
if self.username:
error, response = None, None
for _ in range(self.request_attempt_count):
try:
response = self.client.auth(
self.username,
self.password,
self.compress,
get_mac_address(),
self.version
)
break
except socket_error as ex:
error = ex
time.sleep(0.5)
self.client.close()
self._create_client()
continue
else:
if error and not response:
raise error
if response and response.error:
self.data_api_url = response.error
else:
self.data_api_url = AUTH_API_URL
else:
response = self.client.auth_by_token(self.token)
auth_message = response.msg
if not isatty():
auth_message = ""
if not response.status:
self._threading_local._instance = None
raise self.get_error(response)
else:
if self.not_auth:
print("auth success %s" % auth_message)
self.not_auth = False
def _reset(self):
if self.client:
self.client.close()
self.client = None
self.inited = False
self.http_token = ""
def logout(self):
self._reset()
self._threading_local._instance = None
self.__class__._auth_params = {}
print("已退出")
def get_error(self, response):
err = None
if six.PY2:
system = platform.system().lower()
if system == "windows":
err = Exception(response.error.encode("gbk"))
else:
err = Exception(response.error.encode("utf-8"))
else:
err = Exception(response.error)
return err
@classmethod
def convert_message(cls, msg):
if isinstance(msg, dict):
data_type = msg.get("data_type", None)
data_value = msg.get("data_value", None)
if data_type is not None and data_value is not None:
params = data_value
if data_type.startswith("pandas"):
data_index_type = params.pop("index_type", None)
if data_index_type == "Index":
params["index"] = | pd.Index(params["index"]) | pandas.Index |
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual( | ct('-1d') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
# this code is used to get the 'World ' topic hindi urls and dates in 'ZEEBIZ' website
# we have to change the ' lin ' variable (URL) for every topic in hindi ZEEBIZ
# some list of topics in hindi ZEEBIZ :
# world , small-business , technology , banking , india etc...
# we should have these libraries pre-installed in system
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import tqdm
import pandas as pd
def get_hind_urls():
all_links=[]
date_time=[]
# we need to change this 'range(35)' every new topic based on number of pages available for that perticular topic
for page_num in tqdm.tqdm(range(35)):
try:
lin='https://www.zeebiz.com/hindi/small-business?page='+str(page_num)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
reg_url =lin
req = Request(url=reg_url, headers=headers)
content = urlopen(req).read()
soup = BeautifulSoup(content, 'lxml')
table = soup.findAll('div',attrs={"class":"views-element-container"})
one=table[2].find_all('div', {'class': 'mostrecent12'})
for i in range(10):
all_links.append('https://www.zeebiz.com'+one[i].a['href'])
date_time.append(one[i].text.split('\n')[-3].strip())
except:
print('some thing went wrong with this page number : '+ str(page_num))
return all_links , date_time
if __name__=='__main__':
links,dates=get_hind_urls()
df= | pd.DataFrame() | pandas.DataFrame |
"""MovieLens dataset"""
import numpy as np
import os
import re
import pandas as pd
import scipy.sparse as sp
import torch as th
import scipy.sparse
from scipy.sparse import coo_matrix
import dgl
from dgl.data.utils import download, extract_archive, get_download_dir
from utils import to_etype_name
import pickle as pkl
import h5py
import pdb
import random
from scipy.sparse import linalg
from data_utils import load_data, map_data, download_dataset
from sklearn.metrics import mean_squared_error
from math import sqrt
from bidict import bidict
_urls = {
'ml-100k' : 'http://files.grouplens.org/datasets/movielens/ml-100k.zip',
'ml-1m' : 'http://files.grouplens.org/datasets/movielens/ml-1m.zip',
'ml-10m' : 'http://files.grouplens.org/datasets/movielens/ml-10m.zip',
}
_paths = {
'flixster' : './raw_data/flixster/training_test_dataset.mat',
'douban' : './raw_data/douban/training_test_dataset.mat',
'yahoo_music' : './raw_data/yahoo_music/training_test_dataset.mat',
'ml-100k' : './raw_data/ml-100k/',
'ml-1m' : './raw_data/ml-1m/',
'ml-10m' : './raw_data/ml-10M100K/',
'Tmall':'./raw_data/Tmall/tzzs_data.csv',
'Tmall_small':'./raw_data/Tmall_small/Tmall_small.rating',
'Tmall_0_4000_20_4000':'./raw_data/Tmall_small/Tmall_0_4000_20_4000.rating',
'Tmall_20_4000_20_4000':'./raw_data/Tmall_small/Tmall_20_4000_20_4000.rating',
'Tmall_40_4000_40_4000':'./raw_data/Tmall_small/Tmall_40_4000_40_4000.rating',
"taobao_10_2":'./raw_data/taobao_10_2/',
"taobao_15_5":'./raw_data/taobao_15_5/',
"taobao_8_3":'./raw_data/taobao_8_3/',
"taobao":'./raw_data/Taobao1/',
"Beibei":'./raw_data/Beibei/'
#'Tmall_small':'./raw_data/Tmall_small/tzzs_data.csv'
}
READ_DATASET_PATH = get_download_dir()
GENRES_ML_100K =\
['unknown', 'Action', 'Adventure', 'Animation',
'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'War', 'Western']
GENRES_ML_1M = GENRES_ML_100K[1:]
GENRES_ML_10M = GENRES_ML_100K + ['IMAX']
def load_data(csv_file):
tp = pd.read_csv(csv_file, sep='\t')
return tp
def get_count(tp, id):
playcount_groupbyid = tp[[id]].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
class DataSetLoader(object):
def __init__(self, name, device, mix_cpu_gpu=False,
use_one_hot_fea=True, symm=True,
test_ratio=0.1, valid_ratio=0.1,sparse_ratio = 0, sample_rate = 3):
self._name = name
self._device = device
self._symm = symm
self._test_ratio = test_ratio
self._valid_ratio = valid_ratio
print("_paths[self._name]:",_paths[self._name])
self._dir = os.path.join(_paths[self._name])
self.sample_rate = sample_rate
print(self._name[0:5])
if self._name in ['ml-100k', 'ml-1m', 'ml-10m']:
# download and extract
download_dir = get_download_dir()
print("download_dir: ", download_dir)
zip_file_path = '{}/{}.zip'.format(download_dir, name)
download(_urls[name], path=zip_file_path)
extract_archive(zip_file_path, '{}/{}'.format(download_dir, name))
if name == 'ml-10m':
root_folder = 'ml-10M100K'
else:
root_folder = name
self._dir = os.path.join(download_dir, name, root_folder)
print("Starting processing {} ...".format(self._name))
self._load_raw_user_info()
self._load_raw_movie_info()
print('......')
if self._name == 'ml-100k':
self.all_train_rating_info = self._load_raw_rates(os.path.join(self._dir, 'u1.base'), '\t')
self.test_rating_info = self._load_raw_rates(os.path.join(self._dir, 'u1.test'), '\t')
self.all_rating_info = pd.concat([self.all_train_rating_info, self.test_rating_info])
elif self._name == 'ml-1m' or self._name == 'ml-10m':
self.all_rating_info = self._load_raw_rates(os.path.join(self._dir, 'ratings.dat'), '::')
num_test = int(np.ceil(self.all_rating_info.shape[0] * self._test_ratio))
shuffled_idx = np.random.permutation(self.all_rating_info.shape[0])
self.test_rating_info = self.all_rating_info.iloc[shuffled_idx[: num_test]]
self.all_train_rating_info = self.all_rating_info.iloc[shuffled_idx[num_test: ]]
else:
raise NotImplementedError
print('......')
num_valid = int(np.ceil(self.all_train_rating_info.shape[0] * self._valid_ratio))
shuffled_idx = np.random.permutation(self.all_train_rating_info.shape[0])
self.valid_rating_info = self.all_train_rating_info.iloc[shuffled_idx[: num_valid]]
self.train_rating_info = self.all_train_rating_info.iloc[shuffled_idx[num_valid: ]]
self.possible_rating_values = np.append(np.unique(self.train_rating_info["rating"].values) ,0)
elif self._name in ['Tmall', 'Tmall_small'] or self._name[0:5] == 'Tmall':
#self.all_rating_info, M = self._load_tmall(os.path.join(_paths[self._name]))
#print(self._name[0:5])
self.all_rating_info = self._load_raw_rates_Tmall(os.path.join(_paths[self._name]), ' ')
#print(self.all_rating_info)
num_test = int(np.ceil(self.all_rating_info.shape[0] * (1 - self._test_ratio)))
shuffled_idx = np.random.permutation(self.all_rating_info.shape[0])
#self.test_rating_info = self.all_rating_info.iloc[shuffled_idx[: num_test]]
#self.all_train_rating_info = self.all_rating_info.iloc[shuffled_idx[num_test: ]]
self.test_rating_info = self.all_rating_info.iloc[num_test:]
self.all_train_rating_info = self.all_rating_info.iloc[: num_test]
#print("self.all_train_rating_info")
#print(self.all_train_rating_info)
user_list = pd.unique(self.all_rating_info["user_id"].values)
item_list = pd.unique(self.all_rating_info["movie_id"].values)
#print("*******", user_list)
user_nodes, item_nodes = user_list, item_list
print('......')
num_valid = int(np.ceil(self.all_train_rating_info.shape[0] * self._valid_ratio))
#shuffled_idx = np.random.permutation(self.all_train_rating_info.shape[0])
#self.valid_rating_info = self.all_train_rating_info.iloc[shuffled_idx[: num_valid]]
#self.train_rating_info = self.all_train_rating_info.iloc[shuffled_idx[num_valid: ]]
self.valid_rating_info = self.all_train_rating_info.iloc[: num_valid]
self.train_rating_info = self.all_train_rating_info.iloc[num_valid: ]
shuffled_idx = np.random.permutation(self.train_rating_info.shape[0])
self.train_rating_info = self.train_rating_info.iloc[shuffled_idx]
self.possible_rating_values = np.append(np.unique(self.train_rating_info["rating"].values) ,0)
#print(self.possible_rating_values)
elif self._name in ['taobao', 'Beibei']:
tp_test = load_data(os.path.join(self._dir, 'buy.test.txt'))
tp_train = load_data(os.path.join(self._dir, 'buy.train.txt'))
tp_view = load_data(os.path.join(self._dir, 'pv.csv'))
tp_cart = load_data(os.path.join(self._dir, 'cart.csv'))
tp_train.insert(tp_train.shape[1], 'rating', 1)
tp_test.insert(tp_test.shape[1], 'rating', 1)
tp_view = tp_view.drop(columns='time')
tp_view = tp_view.drop(columns='count')
tp_view.insert(tp_view.shape[1], 'rating', 1)
tp_cart = tp_cart.drop(columns='time')
tp_cart = tp_cart.drop(columns='count')
tp_cart.insert(tp_cart.shape[1], 'rating', 2)
# tp_train = tp_train[0:4429]
# tp_test = tp_test[0:1000]
# tp_view = tp_view[0:33084]
# tp_cart = tp_cart[0:4329]
colum = ['user_id','movie_id','rating']
tp_train.columns = colum
tp_test.columns = colum
tp_view.columns = colum
tp_cart.columns = colum
tp_all = tp_train.append(tp_test)
usercount, itemcount = get_count(tp_all, 'user_id'), get_count(tp_all, 'movie_id')
n_users, n_items = usercount.shape[0], itemcount.shape[0]
# n_users, n_items = usercount.shape[0], 39493
# test buy data
self.test_rating_info = tp_test
self.test_rating_info.columns = colum
#shuffled_idx = np.random.permutation(self.test_rating_info.shape[0])
#self.test_rating_info = self.test_rating_info.iloc[shuffled_idx]
#### valid buy data
# data_list = tp_train.values
# result = []
# for i in range(0, data_list.shape[0]):
# if data_list[i][0] == data_list[i-1][0]+1:
# result.append(data_list[i-1])
# result = np.squeeze(result)
# self.valid_rating_info = pd.DataFrame(result)
# self.valid_rating_info.columns = colum
# shuffled_idx = np.random.permutation(self.valid_rating_info.shape[0])
# self.valid_rating_info = self.valid_rating_info.iloc[shuffled_idx]
#### train buy data
#data_list = tp_train.values
# result = []
# for i in range(0, data_list.shape[0]-1):
# if data_list[i+1][0] == data_list[i][0]:
# result.append(data_list[i])
# result = np.squeeze(result)
# tp_train = pd.DataFrame(result)
# tp_train = pd.DataFrame(data_list)
# buy data add cart and view
#frames = []
# colum = ['user_id','movie_id','rating']
# tp_train.columns = colum
# tp_test.columns = colum
# tp_view.columns = colum
# tp_cart.columns = colum
# 所有数据
#self.all_train_rating_info = pd.concat([tp_train, tp_cart, tp_view],axis = 0)
self.all_train_rating_info = pd.concat([tp_train],axis = 0)
#self.all_train_rating_info = pd.concat([tp_train, tp_cart],axis = 0)
#self.all_train_rating_info = tp_train
self.all_train_rating_info.columns = colum
#shuffled_idx = np.random.permutation(self.all_train_rating_info.shape[0])
#self.all_train_rating_info = self.all_train_rating_info.iloc[shuffled_idx]
# self.all_train_rating_info = self._load_raw_rates_taobao(os.path.join(self._dir, 'taobao_train.rating'), ' ')
# print("rating:",self.all_train_rating_info)
# self.test_rating_info = self._load_raw_rates_taobao(os.path.join(self._dir, 'taobao_test.rating'), ' ')
# self.valid_rating_info = self._load_raw_rates_taobao(os.path.join(self._dir, 'taobao_valid.rating'), ' ')
# self.all_rating_info = pd.concat([self.all_train_rating_info, self.test_rating_info, self.valid_rating_info])
self.all_rating_info = pd.concat([self.all_train_rating_info, self.test_rating_info])
#print("self.all_train_rating_info:",self.all_train_rating_info[0:10])
print('......')
self.train_rating_info = self.all_train_rating_info
self.possible_rating_values = np.append(np.unique(self.train_rating_info["rating"].values) ,0)
#self.possible_rating_values = np.unique(self.train_rating_info["rating"].values)
user_list = pd.unique(self.all_rating_info["user_id"].values)
item_list = pd.unique(self.all_rating_info["movie_id"].values)
user_nodes, item_nodes = user_list, item_list
u_train = np.array(tp_train['user_id'], dtype=np.int32)
i_train = np.array(tp_train['movie_id'], dtype=np.int32)
u_test = np.array(tp_test['user_id'], dtype=np.int32)
i_test = np.array(tp_test['movie_id'], dtype=np.int32)
u_view = np.array(tp_view['user_id'], dtype=np.int32)
i_view = np.array(tp_view['movie_id'], dtype=np.int32)
u_cart = np.array(tp_cart['user_id'], dtype=np.int32)
i_cart = np.array(tp_cart['movie_id'], dtype=np.int32)
print(u_train)
count = np.ones(len(u_train))
print("(count, (u_train, i_train):",(count.shape, (u_train.shape, i_train.shape)))
print("(n_users, n_items):",(n_users, n_items))
train_m = scipy.sparse.csr_matrix((count, (u_train, i_train)), dtype=np.int16, shape=(n_users, n_items))
print("train_m:",train_m.shape)
count = np.ones(len(u_test))
test_m = scipy.sparse.csr_matrix((count, (u_test, i_test)), dtype=np.int16, shape=(n_users, n_items))
print("test_m:",test_m.shape)
tset = {}
for i in range(len(u_test)):
if u_test[i] in tset:
#if tset.has_key(u_test[i]):
tset[u_test[i]].append(i_test[i])
else:
tset[u_test[i]] = [i_test[i]]
self.tset = tset
self.train_m = train_m
self.test_m = test_m
print('......')
else:
raise NotImplementedError
self.user_poll = set(pd.unique(self.all_rating_info["user_id"].values))
self.item_poll = set(pd.unique(self.all_rating_info["movie_id"].values))
self.negatives = []
#self.negatives = self.sample_negative(self.train_rating_info, self.sample_rate, random_number=1)
print("All rating pairs : {}".format(self.all_rating_info.shape[0]))
print("\tAll train rating pairs : {}".format(self.all_train_rating_info.shape[0]))
print("\t\tTrain rating pairs : {}".format(self.train_rating_info.shape[0]))
#print("\t\tValid rating pairs : {}".format(self.valid_rating_info.shape[0]))
print("\tTest rating pairs : {}".format(self.test_rating_info.shape[0]))
if self._name in ['ml-100k', 'ml-1m', 'ml-10m']:
self.user_info = self._drop_unseen_nodes(orign_info=self.user_info,
cmp_col_name="id",
reserved_ids_set=set(self.all_rating_info["user_id"].values),
label="user")
self.movie_info = self._drop_unseen_nodes(orign_info=self.movie_info,
cmp_col_name="id",
reserved_ids_set=set(self.all_rating_info["movie_id"].values),
label="movie")
# Map user/movie to the global id
self.global_user_id_map = {ele: i for i, ele in enumerate(self.user_info['id'])}
self.global_movie_id_map = {ele: i for i, ele in enumerate(self.movie_info['id'])}
elif self._name in ['flixster', 'douban', 'yahoo_music','Tmall','Tmall_small','taobao','Beibei'] or self._name[0:5] == 'Tmall' or self._name[0:6] == 'taobao':
self.global_user_id_map = bidict({})
self.global_movie_id_map = bidict({})
# max_uid = 0
# max_vid = 0
print("user and item number:")
# print(user_nodes)
# print(item_nodes)
for i in range(len(user_nodes)):
self.global_user_id_map[user_nodes[i]] = i
for i in range(len(item_nodes)):
self.global_movie_id_map[item_nodes[i]] = i
else:
raise NotImplementedError
print('Total user number = {}, movie number = {}'.format(len(self.global_user_id_map),
len(self.global_movie_id_map)))
self._num_user = len(self.global_user_id_map)
self._num_movie = len(self.global_movie_id_map)
### Generate features
if use_one_hot_fea:
self.user_feature = None
self.movie_feature = None
else:
raise NotImplementedError
# if self.user_feature is None:
# self.user_feature_shape = (self.num_user, self.num_user + self.num_movie + 3)
# self.movie_feature_shape = (self.num_movie, self.num_user + self.num_movie + 3)
# if mix_cpu_gpu:
# self.user_feature = th.cat([th.Tensor(list(range(3, self.num_user+3))).reshape(-1, 1), th.zeros([self.num_user, 1])+1, th.zeros([self.num_user, 1])], 1)
# self.movie_feature = th.cat([th.Tensor(list(range(3, self.num_movie+3))).reshape(-1, 1), th.ones([self.num_movie, 1])+1, th.zeros([self.num_movie, 1])], 1)
# # self.movie_feature = th.cat([th.Tensor(list(range(self.num_user+3, self.num_user + self.num_movie + 3))).reshape(-1, 1), th.ones([self.num_movie, 1])+1, th.zeros([self.num_movie, 1])], 1)
# else:
# self.user_feature = th.cat([th.Tensor(list(range(3, self.num_user+3))).reshape(-1, 1), th.zeros([self.num_user, 1])+1, th.zeros([self.num_user, 1])], 1).to(self._device)
# self.movie_feature = th.cat([th.Tensor(list(range(self.num_user+3, self.num_user + self.num_movie + 3))).reshape(-1, 1), th.ones([self.num_movie, 1])+1, th.zeros([self.num_movie, 1])], 1).to(self._device)
# else:
# raise NotImplementedError
if self.user_feature is None:
self.user_feature_shape = (self.num_user, self.num_user + self.num_movie + 3)
self.movie_feature_shape = (self.num_movie, self.num_user + self.num_movie + 3)
if mix_cpu_gpu:
self.user_feature = th.cat([th.Tensor(list(range(3, self.num_user+3))).reshape(-1, 1)], 1)
self.movie_feature = th.cat([th.Tensor(list(range(3, self.num_movie+3))).reshape(-1, 1)], 1)
# self.movie_feature = th.cat([th.Tensor(list(range(self.num_user+3, self.num_user + self.num_movie + 3))).reshape(-1, 1), th.ones([self.num_movie, 1])+1, th.zeros([self.num_movie, 1])], 1)
else:
self.user_feature = th.cat([th.Tensor(list(range(3, self.num_user+3))).reshape(-1, 1)], 1).to(self._device)
self.movie_feature = th.cat([th.Tensor(list(range(self.num_user+3, self.num_user + self.num_movie + 3))).reshape(-1, 1)], 1).to(self._device)
else:
raise NotImplementedError
# print(self.user_feature.shape)
info_line = "Feature dim: "
info_line += "\nuser: {}".format(self.user_feature_shape)
info_line += "\nmovie: {}".format(self.movie_feature_shape)
print(info_line)
#print(self.valid_rating_info)
all_train_rating_pairs, all_train_rating_values = self._generate_pair_value(self.all_train_rating_info)
train_rating_pairs, train_rating_values = self._generate_pair_value(self.train_rating_info)
#valid_rating_pairs, valid_rating_values = self._generate_pair_value(self.valid_rating_info)
test_rating_pairs, test_rating_values = self._generate_pair_value(self.test_rating_info)
def _make_labels(ratings):
labels = th.LongTensor(np.searchsorted(self.possible_rating_values, ratings)).to(device)
return labels
print("train_rating_values:",train_rating_values)
self.train_enc_graph = self._generate_enc_graph(train_rating_pairs, train_rating_values, add_support=True)
self.train_dec_graph = self._generate_dec_graph(train_rating_pairs)
self.train_labels = _make_labels(train_rating_values)
self.train_truths = th.FloatTensor(train_rating_values).to(device)
#self.valid_enc_graph = self.train_enc_graph
#self.valid_dec_graph = self._generate_dec_graph(valid_rating_pairs)
#self.valid_labels = _make_labels(valid_rating_values)
#self.valid_truths = th.FloatTensor(valid_rating_values).to(device)
self.test_enc_graph = self._generate_enc_graph(all_train_rating_pairs, all_train_rating_values, add_support=True)
self.test_dec_graph = self._generate_dec_graph(test_rating_pairs)
self.test_labels = _make_labels(test_rating_values)
self.test_truths = th.FloatTensor(test_rating_values).to(device)
#创建一个用来测试召回数据的图
self.test_recall_labels = _make_labels(self.test_rating_info)
#valid_recall_pair, valid_rating_matrix = self._generate_pair_value_for_recall(self.valid_rating_info)
#self.valid_rating_matrix = th.FloatTensor(valid_rating_matrix).to(device)
#self.valid_recall_dec_graph = self._generate_dec_graph(valid_recall_pair)
#test_recall_pair, test_rating_matrix = self._generate_pair_value_for_recall_new(user_list, item_len)
#test_recall_pair = self._generate_pair_value_for_recall_new(user_list, item_len)
#self.test_rating_matrix = th.FloatTensor(test_rating_matrix).to(device)
#self.test_recall_dec_graph = self._generate_dec_graph(test_recall_pair)
def _npairs(graph):
rst = 0
for r in self.possible_rating_values:
r = to_etype_name(r)
rst += graph.number_of_edges(str(r))
return rst
print("Train enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_enc_graph.number_of_nodes('user'), self.train_enc_graph.number_of_nodes('movie'),
_npairs(self.train_enc_graph)))
print("Train dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
self.train_dec_graph.number_of_nodes('user'), self.train_dec_graph.number_of_nodes('movie'),
self.train_dec_graph.number_of_edges()))
# print("Valid enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
# self.valid_enc_graph.number_of_nodes('user'), self.valid_enc_graph.number_of_nodes('movie'),
# _npairs(self.valid_enc_graph)))
# print("Valid dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
# self.valid_dec_graph.number_of_nodes('user'), self.valid_dec_graph.number_of_nodes('movie'),
# self.valid_dec_graph.number_of_edges()))
# print("Test enc graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
# self.test_enc_graph.number_of_nodes('user'), self.test_enc_graph.number_of_nodes('movie'),
# _npairs(self.test_enc_graph)))
# print("Test dec graph: \t#user:{}\t#movie:{}\t#pairs:{}".format(
# self.test_dec_graph.number_of_nodes('user'), self.test_dec_graph.number_of_nodes('movie'),
# self.test_dec_graph.number_of_edges()))
def sample_negative(self, ratings, sample_rate, random_number):
#"""return all negative items & 100 sampled negative items"""
random.seed(random_number)
interact_status = ratings.groupby('user_id')['movie_id'].apply(set).reset_index().rename(columns={'itemId': 'interacted_items'})
#print(interact_status)
#item_list = set(item_list)
interact_status['negative_items'] = interact_status['movie_id'].apply(lambda x: self.item_poll - x)
#print(interact_status['negative_items'])
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
def negative_all(self, ratings):
#"""return all negative items """
interact_status = ratings.groupby('user_id')['movie_id'].apply(set).reset_index().rename(columns={'itemId': 'interacted_items'})
#print(interact_status)
#item_list = set(item_list)
interact_status['negative_items'] = interact_status['movie_id'].apply(lambda x: list(self.item_poll - x))
#print(interact_status['negative_items'])
return interact_status[['user_id', 'negative_items']]
def _generate_pair_value_for_zero(self,train_ratings, negatives):
train_ratings = | pd.merge(self.all_train_rating_info, negatives[['user_id', 'negative_samples']], on='user_id') | pandas.merge |
import utils
import pandas as pd
import numpy as np
import logging
import datetime
def data_station_year(dirname='Data/training/',year=2016):
logger = logging.getLogger(__name__)
logger.info("load all data")
x=utils.load_data(dirname=dirname,year=year)
print(" start loading chimere")
t1 = x['chimeres']['NO2']
t1.drop(columns='param', inplace=True)
t1['date'] = pd.to_datetime(t1['date'])
t1.set_index(['idPolair', 'date'], inplace=True)
t1.columns = ['NO2_chimere']
t2 = x['chimeres']['O3']
t2.drop(columns='param', inplace=True)
t2['date'] = pd.to_datetime(t2['date'])
t2.set_index(['idPolair', 'date'], inplace=True)
t2.columns = ['O3_chimere']
t3 = x['chimeres']['PM10']
t3.drop(columns='param', inplace=True)
t3['date'] = pd.to_datetime(t3['date'])
t3.set_index(['idPolair', 'date'], inplace=True)
t3.columns = ['PM10_chimere']
t4 = x['chimeres']['PM25']
t4.drop(columns='param', inplace=True)
t4['date'] = pd.to_datetime(t4['date'])
t4.set_index(['idPolair', 'date'], inplace=True)
t4.columns = ['PM25_chimere']
t5 = pd.merge(t1,pd.merge(t2, pd.merge(t3, t4, left_index=True, right_index=True,how='outer'), left_index=True, right_index=True,how='outer'),left_index=True, right_index=True,how='outer')
t5.sort_index(inplace=True)
#t5.interpolate(inplace=True)
#print(" remove nan")
#t5 = t5.groupby(t5.columns, axis=1).transform(lambda x: x.fillna(x.mean()))
#t5.fillna(t5.mean(), inplace=True)
#print(" set zeros")
#t5.fillna(0, inplace=True)
print(" start loading meteo")
meteo = x['meteo']
meteo['date'] = pd.to_datetime(meteo['date'])
meteo.set_index(['idPolair', 'date'], inplace=True)
meteo.sort_index(inplace=True)
#meteo.interpolate(inplace=True)
#print(" remove nan")
#meteo = meteo.groupby(meteo.columns, axis=1).transform(lambda x: x.fillna(x.mean()))
#meteo.fillna(meteo.mean(), inplace=True)
#print(" set zeros")
#meteo.fillna(0, inplace=True)
allData = pd.merge(meteo, t5, left_index=True, right_index=True,how='outer')
print(" start loading geops")
geops = x['geops']
keys = np.fromiter(geops.keys(), dtype=float)
z = pd.concat(geops.values(), keys=keys)
z['date'] = pd.to_datetime(z['date'])
z.set_index(['idPolair', 'date'], inplace=True)
z.sort_index(inplace=True)
#z.interpolate(inplace=True)
#print(" remove nan")
#z = z.groupby(z.columns, axis=1).transform(lambda x: x.fillna(x.mean()))
#z.fillna(z.mean(), inplace=True)
#print(" set zeros")
#z.fillna(0, inplace=True)
allData = pd.merge(allData, z, left_index=True, right_index=True,how='outer')
print(" start loading target values")
t6 = x['concentrations']['NO2']
t6.drop(columns=['Organisme','Station','Mesure'], inplace=True)
t6['date'] = pd.to_datetime(t6['date'])
t6['idPolair']=pd.to_numeric( t6['idPolair'])
t6.set_index(['idPolair', 'date'], inplace=True)
t6.columns = ['NO2']
t7 = x['concentrations']['O3']
t7.drop(columns=['Organisme','Station','Mesure'], inplace=True)
t7['date'] = pd.to_datetime(t7['date'])
t7['idPolair'] = pd.to_numeric(t7['idPolair'])
t7.set_index(['idPolair', 'date'], inplace=True)
t7.columns = ['O3']
t8 = x['concentrations']['PM10']
t8.drop(columns=['Organisme','Station','Mesure'], inplace=True)
t8['date'] = pd.to_datetime(t8['date'])
t8['idPolair'] = pd.to_numeric(t8['idPolair'])
t8.set_index(['idPolair', 'date'], inplace=True)
t8.columns = ['PM10']
t9 = x['concentrations']['PM25']
t9.drop(columns=['Organisme','Station','Mesure'], inplace=True)
t9['date'] = pd.to_datetime(t9['date'])
t9['idPolair'] = pd.to_numeric(t9['idPolair'])
t9.set_index(['idPolair', 'date'], inplace=True)
t9.columns = ['PM25']
t10 = pd.merge(t6,pd.merge(t7, | pd.merge(t8, t9, left_index=True, right_index=True,how='outer') | pandas.merge |
import copy
import logging
import os
import time
from collections import defaultdict
from typing import List, Union, Tuple
import networkx as nx
import numpy as np
import pandas as pd
import psutil
from .utils import process_hyperparameters
from ..augmentation.distill_utils import format_distillation_labels, augment_data
from ..constants import AG_ARGS, BINARY, MULTICLASS, REGRESSION, REFIT_FULL_NAME, REFIT_FULL_SUFFIX
from ..models import AbstractModel, BaggedEnsembleModel, StackerEnsembleModel, WeightedEnsembleModel, GreedyWeightedEnsembleModel, SimpleWeightedEnsembleModel
from ..features.feature_metadata import FeatureMetadata
from ..scheduler.scheduler_factory import scheduler_factory
from ..utils import default_holdout_frac, get_pred_from_proba, generate_train_test_split, infer_eval_metric, compute_permutation_feature_importance, extract_column, compute_weighted_metric
from ..utils.exceptions import TimeLimitExceeded, NotEnoughMemoryError, NoValidFeatures, NoGPUError
from ..utils.loaders import load_pkl
from ..utils.savers import save_json, save_pkl
from ..utils.feature_selection import FeatureSelector
logger = logging.getLogger(__name__)
# FIXME: Below is major defect!
# Weird interaction for metrics like AUC during bagging.
# If kfold = 5, scores are 0.9, 0.85, 0.8, 0.75, and 0.7, the score is not 0.8! It is much lower because probs are combined together and AUC is recalculated
# Do we want this to happen? Should we calculate score by 5 separate scores and then averaging instead?
# TODO: Dynamic model loading for ensemble models during prediction, only load more models if prediction is uncertain. This dynamically reduces inference time.
# TODO: Try midstack Semi-Supervised. Just take final models and re-train them, use bagged preds for SS rows. This would be very cheap and easy to try.
# TODO: Move to autogluon.core
class AbstractTrainer:
trainer_file_name = 'trainer.pkl'
trainer_info_name = 'info.pkl'
trainer_info_json_name = 'info.json'
distill_stackname = 'distill' # name of stack-level for distilled student models
def __init__(self, path: str, problem_type: str, eval_metric=None,
num_classes=None, quantile_levels=None, low_memory=False, feature_metadata=None, k_fold=0, n_repeats=1,
sample_weight=None, weight_evaluation=False, save_data=False, random_state=0, verbosity=2):
self.path = path
self.problem_type = problem_type
self.feature_metadata = feature_metadata
self.save_data = save_data
self.random_state = random_state # Integer value added to the stack level to get the random_state for kfold splits or the train/val split if bagging is disabled
self.verbosity = verbosity
self.sample_weight = sample_weight # TODO: consider redesign where Trainer doesnt need sample_weight column name and weights are separate from X
self.weight_evaluation = weight_evaluation
if eval_metric is not None:
self.eval_metric = eval_metric
else:
self.eval_metric = infer_eval_metric(problem_type=self.problem_type)
logger.log(25, f"AutoGluon will gauge predictive performance using evaluation metric: '{self.eval_metric.name}'")
if not (self.eval_metric.needs_pred or self.eval_metric.needs_quantile):
logger.log(25, "\tThis metric expects predicted probabilities rather than predicted class labels, so you'll need to use predict_proba() instead of predict()")
logger.log(20, "\tTo change this, specify the eval_metric argument of fit()")
self.num_classes = num_classes
self.quantile_levels = quantile_levels
self.feature_prune = False # will be set to True if feature-pruning is turned on.
self.low_memory = low_memory
self.bagged_mode = True if k_fold >= 2 else False
if self.bagged_mode:
self.k_fold = k_fold # int number of folds to do model bagging, < 2 means disabled
self.n_repeats = n_repeats
else:
self.k_fold = 0
self.n_repeats = 1
self.model_best = None
self.models = {} # Dict of model name -> model object. A key, value pair only exists if a model is persisted in memory. # TODO: v0.1 Rename and consider making private
self.model_graph = nx.DiGraph() # Directed Acyclic Graph (DAG) of model interactions. Describes how certain models depend on the predictions of certain other models. Contains numerous metadata regarding each model.
self.model_full_dict = {} # Dict of normal model -> FULL model. FULL models are produced by self.refit_single_full() and self.refit_ensemble_full().
self._model_full_dict_val_score = {} # Dict of FULL model -> normal model validation score in case the normal model had been deleted.
self.reset_paths = False
self._time_limit = None # Internal float of the total time limit allowed for a given fit call. Used in logging statements.
self._time_train_start = None # Internal timestamp of the time training started for a given fit call. Used in logging statements.
self._num_rows_train = None
self._num_cols_train = None
self.is_data_saved = False
self._X_saved = False
self._y_saved = False
self._X_val_saved = False
self._y_val_saved = False
self._groups = None # custom split indices
self._regress_preds_asprobas = False # whether to treat regression predictions as class-probabilities (during distillation)
self._extra_banned_names = set() # Names which are banned but are not used by a trained model.
# self._exceptions_list = [] # TODO: Keep exceptions list for debugging during benchmarking.
# path_root is the directory containing learner.pkl
@property
def path_root(self) -> str:
return self.path.rsplit(os.path.sep, maxsplit=2)[0] + os.path.sep
@property
def path_utils(self) -> str:
return self.path_root + 'utils' + os.path.sep
@property
def path_data(self) -> str:
return self.path_utils + 'data' + os.path.sep
def load_X(self):
if self._X_saved:
path = self.path_data + 'X.pkl'
return load_pkl.load(path=path)
return None
def load_X_val(self):
if self._X_val_saved:
path = self.path_data + 'X_val.pkl'
return load_pkl.load(path=path)
return None
def load_y(self):
if self._y_saved:
path = self.path_data + 'y.pkl'
return load_pkl.load(path=path)
return None
def load_y_val(self):
if self._y_val_saved:
path = self.path_data + 'y_val.pkl'
return load_pkl.load(path=path)
return None
def load_data(self):
X = self.load_X()
y = self.load_y()
X_val = self.load_X_val()
y_val = self.load_y_val()
return X, y, X_val, y_val
def save_X(self, X, verbose=True):
path = self.path_data + 'X.pkl'
save_pkl.save(path=path, object=X, verbose=verbose)
self._X_saved = True
def save_X_val(self, X, verbose=True):
path = self.path_data + 'X_val.pkl'
save_pkl.save(path=path, object=X, verbose=verbose)
self._X_val_saved = True
def save_y(self, y, verbose=True):
path = self.path_data + 'y.pkl'
save_pkl.save(path=path, object=y, verbose=verbose)
self._y_saved = True
def save_y_val(self, y, verbose=True):
path = self.path_data + 'y_val.pkl'
save_pkl.save(path=path, object=y, verbose=verbose)
self._y_val_saved = True
def get_model_names(self, stack_name: Union[List[str], str] = None, level: Union[List[int], int] = None, can_infer: bool = None, models: List[str] = None) -> List[str]:
if models is None:
models = list(self.model_graph.nodes)
if stack_name is not None:
if not isinstance(stack_name, list):
stack_name = [stack_name]
node_attributes: dict = self.get_models_attribute_dict(attribute='stack_name')
models = [model_name for model_name in models if node_attributes[model_name] in stack_name]
if level is not None:
if not isinstance(level, list):
level = [level]
node_attributes: dict = self.get_models_attribute_dict(attribute='level')
models = [model_name for model_name in models if node_attributes[model_name] in level]
# TODO: can_infer is technically more complicated, if an ancestor can't infer then the model can't infer.
if can_infer is not None:
node_attributes = self.get_models_attribute_dict(attribute='can_infer')
models = [model for model in models if node_attributes[model] == can_infer]
return models
def get_max_level(self, stack_name: str = None, models: List[str] = None) -> int:
models = self.get_model_names(stack_name=stack_name, models=models)
models_attribute_dict = self.get_models_attribute_dict(attribute='level', models=models)
if models_attribute_dict:
return max(list(models_attribute_dict.values()))
else:
return -1
def construct_model_templates(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:
"""Constructs a list of unfit models based on the hyperparameters dict."""
raise NotImplementedError
def construct_model_templates_distillation(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:
"""Constructs a list of unfit models based on the hyperparameters dict for softclass distillation."""
raise NotImplementedError
def get_model_level(self, model_name: str) -> int:
return self.get_model_attribute(model=model_name, attribute='level')
def set_contexts(self, path_context):
self.path, model_paths = self.create_contexts(path_context)
for model, path in model_paths.items():
self.set_model_attribute(model=model, attribute='path', val=path)
def create_contexts(self, path_context: str) -> (str, dict):
path = path_context
model_paths = self.get_models_attribute_dict(attribute='path')
for model, prev_path in model_paths.items():
model_local_path = prev_path.split(self.path, 1)[1]
new_path = path + model_local_path
model_paths[model] = new_path
return path, model_paths
def fit(self, X, y, hyperparameters: dict, X_val=None, y_val=None, **kwargs):
raise NotImplementedError
# TODO: Enable easier re-mapping of trained models -> hyperparameters input (They don't share a key since name can change)
def train_multi_levels(self, X, y, hyperparameters: dict, X_val=None, y_val=None, X_unlabeled=None, base_model_names: List[str] = None,
core_kwargs: dict = None, aux_kwargs: dict = None, level_start=1, level_end=1, time_limit=None, name_suffix: str = None,
relative_stack=True, level_time_modifier=0.333) -> List[str]:
"""
Trains a multi-layer stack ensemble using the input data on the hyperparameters dict input.
hyperparameters is used to determine the models used in each stack layer.
If continuing a stack ensemble with level_start>1, ensure that base_model_names is set to the appropriate base models that will be used by the level_start level models.
Trains both core and aux models.
core models are standard models which are fit on the data features. Core models will also use model predictions if base_model_names was specified or if level != 1.
aux models are ensemble models which only use the predictions of core models as features. These models never use the original features.
level_time_modifier : float, default 0.333
The amount of extra time given relatively to early stack levels compared to later stack levels.
If 0, then all stack levels are given 100%/L of the time, where L is the number of stack levels.
If 1, then all stack levels are given 100% of the time, meaning if the first level uses all of the time given to it, the other levels won't train.
Time given to a level = remaining_time / remaining_levels * (1 + level_time_modifier), capped by total remaining time.
Returns a list of the model names that were trained from this method call, in order of fit.
"""
self._time_limit = time_limit
self._time_train_start = time.time()
time_train_start = self._time_train_start
hyperparameters = self._process_hyperparameters(hyperparameters=hyperparameters)
if relative_stack:
if level_start != 1:
raise AssertionError(f'level_start must be 1 when `relative_stack=True`. (level_start = {level_start})')
level_add = 0
if base_model_names:
max_base_model_level = self.get_max_level(models=base_model_names)
level_start = max_base_model_level + 1
level_add = level_start - 1
level_end += level_add
if level_start != 1:
hyperparameters_relative = {}
for key in hyperparameters:
if isinstance(key, int):
hyperparameters_relative[key+level_add] = hyperparameters[key]
else:
hyperparameters_relative[key] = hyperparameters[key]
hyperparameters = hyperparameters_relative
core_kwargs = {} if core_kwargs is None else core_kwargs.copy()
aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()
model_names_fit = []
if level_start != level_end:
logger.log(20, f'AutoGluon will fit {level_end - level_start + 1} stack levels (L{level_start} to L{level_end}) ...')
for level in range(level_start, level_end + 1):
core_kwargs_level = core_kwargs.copy()
aux_kwargs_level = aux_kwargs.copy()
if time_limit is not None:
time_train_level_start = time.time()
levels_left = level_end - level + 1
time_left = time_limit - (time_train_level_start - time_train_start)
time_limit_for_level = min(time_left / levels_left * (1 + level_time_modifier), time_left)
time_limit_core = time_limit_for_level
time_limit_aux = max(time_limit_for_level * 0.1, min(time_limit, 360)) # Allows aux to go over time_limit, but only by a small amount
core_kwargs_level['time_limit'] = core_kwargs_level.get('time_limit', time_limit_core)
aux_kwargs_level['time_limit'] = aux_kwargs_level.get('time_limit', time_limit_aux)
base_model_names, aux_models = self.stack_new_level(
X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled,
models=hyperparameters, level=level, base_model_names=base_model_names,
core_kwargs=core_kwargs_level, aux_kwargs=aux_kwargs_level, name_suffix=name_suffix,
)
model_names_fit += base_model_names + aux_models
self._time_limit = None
self.save()
return model_names_fit
def stack_new_level(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None, level=1, base_model_names: List[str] = None,
core_kwargs: dict = None, aux_kwargs: dict = None, name_suffix: str = None) -> (List[str], List[str]):
"""
Similar to calling self.stack_new_level_core, except auxiliary models will also be trained via a call to self.stack_new_level_aux, with the models trained from self.stack_new_level_core used as base models.
"""
if base_model_names is None:
base_model_names = []
if level < 1:
raise AssertionError(f'Stack level must be >= 1, but level={level}.')
elif not base_model_names and level > 1:
logger.log(30, f'Warning: Training models at stack level {level}, but no base models were specified.')
elif base_model_names and level == 1:
raise AssertionError(f'Stack level 1 models cannot have base models, but base_model_names={base_model_names}.')
core_kwargs = {} if core_kwargs is None else core_kwargs.copy()
aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()
if name_suffix:
core_kwargs['name_suffix'] = core_kwargs.get('name_suffix', '') + name_suffix
aux_kwargs['name_suffix'] = aux_kwargs.get('name_suffix', '') + name_suffix
core_models = self.stack_new_level_core(X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, models=models,
level=level, base_model_names=base_model_names, **core_kwargs)
if X_val is None:
aux_models = self.stack_new_level_aux(X=X, y=y, base_model_names=core_models, level=level+1, **aux_kwargs)
else:
aux_models = self.stack_new_level_aux(X=X_val, y=y_val, fit=False, base_model_names=core_models, level=level+1, **aux_kwargs)
return core_models, aux_models
def stack_new_level_core(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None,
level=1, base_model_names: List[str] = None, stack_name='core',
ag_args=None, ag_args_fit=None, ag_args_ensemble=None, excluded_model_types=None, ensemble_type=StackerEnsembleModel,
name_suffix: str = None, get_models_func=None, refit_full=False, **kwargs) -> List[str]:
"""
Trains all models using the data provided.
If level > 1, then the models will use base model predictions as additional features.
The base models used can be specified via base_model_names.
If self.bagged_mode, then models will be trained as StackerEnsembleModels.
The data provided in this method should not contain stack features, as they will be automatically generated if necessary.
"""
if get_models_func is None:
get_models_func = self.construct_model_templates
if base_model_names is None:
base_model_names = []
if not self.bagged_mode and level != 1:
raise ValueError('Stack Ensembling is not valid for non-bagged mode.')
if isinstance(models, dict):
get_models_kwargs = dict(
level=level,
name_suffix=name_suffix,
ag_args=ag_args,
ag_args_fit=ag_args_fit,
excluded_model_types=excluded_model_types,
)
if self.bagged_mode:
if level == 1:
(base_model_names, base_model_paths, base_model_types) = (None, None, None)
elif level > 1:
base_model_names, base_model_paths, base_model_types = self._get_models_load_info(model_names=base_model_names)
if len(base_model_names) == 0:
logger.log(20, 'No base models to train on, skipping stack level...')
return []
else:
raise AssertionError(f'Stack level cannot be less than 1! level = {level}')
ensemble_kwargs = {
'base_model_names': base_model_names,
'base_model_paths_dict': base_model_paths,
'base_model_types_dict': base_model_types,
'random_state': level + self.random_state,
}
get_models_kwargs.update(dict(
ag_args_ensemble=ag_args_ensemble,
ensemble_type=ensemble_type,
ensemble_kwargs=ensemble_kwargs,
))
models, model_args_fit = get_models_func(hyperparameters=models, **get_models_kwargs)
if model_args_fit:
hyperparameter_tune_kwargs = {
model_name: model_args_fit[model_name]['hyperparameter_tune_kwargs']
for model_name in model_args_fit if 'hyperparameter_tune_kwargs' in model_args_fit[model_name]
}
kwargs['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
logger.log(20, f'Fitting {len(models)} L{level} models ...')
X_init = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=True)
if X_val is not None:
X_val = self.get_inputs_to_stacker(X_val, base_models=base_model_names, fit=False)
if refit_full and X_val is not None:
X_init = | pd.concat([X_init, X_val]) | pandas.concat |
import numpy as np
import pandas as pd
import sys,os
#from random import choices
import random
from datetime import datetime as dt
import json
from ast import literal_eval
import time
from scipy import stats
#from joblib import Parallel, delayed
from libs.lib_job_thread import *
import logging
class SimX:
def __init__(self,*args):
self.platform=args[0]
self.domain=args[1]
self.scenario=args[2]
self.model_identifier=args[3]
self.pool=ThreadPool(32)
self.sim_outputs=[]
self.data_level_degree_list=None
self.data_delay_level_degree_root_list=None
self.data_user_list=None
self.data_user_followers=None
self.data_acts_list=None
self.data_acts_list_indexed=None
self.data_level_content_list=None
# self.logger = logging.getLogger(__name__)
# logPath='./logs/run_simulation_prob_HYDRA_%s_%s_S20001.log'%(self.platform,self.domain)
# handler = logging.FileHandler(logPath,mode='w+')
# handler.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# self.logger.addHandler(handler)
def set_metadata(self):#,degree_list,delay_list):
print("[Degree by level] loading..")
self.data_level_degree_list=pd.read_pickle("./metadata/probs/%s-%s/degree_cond_level.pkl.gz"%(self.platform,self.domain))
print("[Delay sequences by size] loading..")
self.data_delay_level_degree_root_list=pd.read_pickle("./metadata/probs/%s-%s/delay_cond_size.pkl.gz"%(self.platform,self.domain))
# self.data_level_degree_list=degree_list
# self.data_delay_level_degree_root_list=delay_list
def set_user_metadata(self):#,user_list,user_followers):
print("[User probability] loading..")
self.data_user_list=pd.read_pickle("./metadata/probs/%s-%s/user_diffusion.pkl.gz"%(self.platform,self.domain))
self.data_acts_list=self.data_user_list.groupby("pnodeUserID").sum()["no_responses"].reset_index(name="# acts")
# print("[User followers] loading..")
# self.data_user_followers=pd.read_pickle("./metadata/probs/%s-%s/user_followers.pkl.gz"%(self.platform,self.domain))
# x=np.array(self.data_user_followers["user.followers_count"])
# x_ranked=stats.rankdata(x, "average")/len(x)
# self.data_user_followers['percentile']=x_ranked
#self.data_acts_list_indexed=self.data_acts_list.set_index("pnodeUserID")
self.data_user_list.set_index("pnodeUserID",inplace=True)
def set_simulation_metadata(self,content_list):
self.data_level_content_list=content_list
def doSanity(self):
# ## Given any level, return a node with degree X
level=200
b = self._get_degree(level)
print("[sanity] Level: %d, Sampled Degree: %d"%(level,b))
## Given any size of the cascade, return a vector of delays
size=10000
dV = self._get_recorrected_delayV(size)
print("[sanity] Expected: %d, Returned: %d Sampled Delay Vector: "%(size,dV.shape[0]),dV)
# ## Given any degree in the first level, return an arbitrary cascade tree
root_degree=3
ctree=self._gen_cascade_tree(root_degree)
print("[sanity] generated cascade tree")
print(ctree)
def _get_random_id(self):
hash = random.getrandbits(64)
return "%16x"%hash
def _get_random_user_id(self):
try:
random_user_id=self.data_acts_list.sample(n=1,weights="# acts",replace=True).iloc[0]["pnodeUserID"]
except KeyError as ke:
random_user_id=self._get_random_id()
##print("new user: ",random_user_id)
return random_user_id
def _get_neighbor_user_id(self,user_id):
try:
###random_user_id=self.data_user_list.loc[user_id].sample(n=1,weights="prob",replace=True).iloc[0]["source_author"]
##print(self.data_user_list[self.data_user_list['target_author']==user_id])
neighbors=self.data_user_list.loc[user_id]#self.data_user_list[self.data_user_list['pnodeUserID']==user_id]
if neighbors.shape[0]>0:
random_user_id=neighbors.sample(n=1,weights="prob",replace=True).iloc[0]["nodeUserID"]
else:
random_user_id=self._get_random_user_id()
except:
random_user_id=self._get_random_user_id()
return random_user_id
def _get_random_users(self,size):
return [self._get_random_id() for i in range(size)]
def write_output(self,output,scenario,platform,domain,version):
scenario=str(scenario)
version=str(version)
print("version %s"%version)
#output_location="./output/%s/%s/%s/%s-%s_v%s.json"%(platform, domain, version, platform, domain, version)
output_location="./output_%s/%s/%s/scenario_%s_domain_%s-%s_v%s.json"% (self.model_identifier,platform,domain,scenario,platform,domain,version)
output_file = open(output_location, 'w', encoding='utf-8')
output_records=output.to_dict('records')
for d in output_records:
output_file.write(json.dumps(d) + '\n')
def _get_degree(self,level,pa=False):
sampled_degree=0
ulevels=set(self.data_level_degree_list.index.get_level_values('level'))
flag=False;
while not flag:
flag=(level in ulevels)
if(flag==False):
level-=1
degreeList=np.array(self.data_level_degree_list.loc[level]['udegreeV'])
degreeProbList=np.array(self.data_level_degree_list.loc[level]["probV"])
if pa:
degreeProbList=1/degreeProbList
degreeProbList=degreeProbList/np.sum(degreeProbList)
##print(level,degreeList,degreeProbList)
if len(degreeList)>0:
sampled_degree = np.random.choice(a=degreeList, p=degreeProbList)
return sampled_degree
# def _get_pa_degree(self,level):
# sampled_degree=0
# ulevels=set(self.data_level_degree_list.index.get_level_values('level'))
# flag=False;
# while not flag:
# flag=(level in ulevels)
# if(flag==False):
# level-=1
# degreeList=np.array(self.data_level_degree_list.loc[level]['udegreeV'])
# degreeProbList=np.array(1-self.data_level_degree_list.loc[level]["probV"])
# if len(degreeList)>0:
# sampled_degree = np.random.choice(a=degreeList, p=degreeProbList)
# return sampled_degree
def _get_delayV(self,size):
sample_delays=self.data_delay_level_degree_root_list[self.data_delay_level_degree_root_list["size"]==size]
no_records=sample_delays.shape[0]
if no_records>0:
sample_delay=sample_delays.sample(n=1, replace=False)
dV=np.array(list(sample_delay["delayV"]))[0]
return dV
else:
max_size=self.data_delay_level_degree_root_list["size"].max()
if(size>max_size):
return self._get_delayV(max_size)
else:
return self._get_delayV(size+1)
def _get_recorrected_delayV(self,size):
dV = self._get_delayV(size)
if(dV.shape[0]>size):
dV=dV[:size]
else:
max_ldelay=np.max(dV)
for n in range(len(dV), size):
dV=np.append(dV,max_ldelay)
return dV
# def _get_contentV(self,level):
# ulevels=set(self.data_level_content_list.index.get_level_values('level'))
# flag=False;
# while not flag:
# flag=(level in ulevels)
# if(flag==False):
# level-=1
# contentV=self.data_level_content_list.iloc[level]['contentV']
# sampled_contentV = contentV[np.random.randint(0,len(contentV))]
# return sampled_contentV[1:]
def _get_synthetic_tree_recursive(self,level,pdegree,cascade_tree_matrix,nlist):
if(cascade_tree_matrix is None):
cascade_tree_matrix=[]
cascade_tree_matrix.append(nlist)
children=pdegree
while(children>0):
mid=self._get_random_id()
pid=nlist[2]
puser_id=nlist[4]
nuser_id=self._get_neighbor_user_id(puser_id)
###nuser_id=self._get_random_id()
ndegree=self._get_degree(level)
klist=[level,ndegree,mid,pid,nuser_id]
cascade_tree_matrix.append(klist)
self._get_synthetic_tree_recursive(level+1,ndegree,cascade_tree_matrix,klist)
children-=1
return cascade_tree_matrix
def _gen_cascade_tree(self,pid=None,puser_id=None,pdegree=None):
level=0
## post id
if pid is None:
pid=self._get_random_id()
## post user id
if puser_id is None:
puser_id=self._get_random_user_id()
## post degree
if pdegree is None:
pdegree=self._get_degree(level)
## level, my degree, my id, my parent id
nlist=[level,pdegree,pid,pid,puser_id]
cascade_tree_matrix=self._get_synthetic_tree_recursive(level+1,pdegree,None,nlist)
cascade_tree= | pd.DataFrame(cascade_tree_matrix,columns=["level","degree","nodeID","parentID","nodeUserID"]) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from .... import dataframe as md
from ....core.operand import OperandStage
from ....tests.core import assert_groupby_equal, require_cudf
from ....utils import arrow_array_to_objects
from ..aggregation import DataFrameGroupByAgg
class MockReduction1(md.CustomReduction):
def agg(self, v1):
return v1.sum()
class MockReduction2(md.CustomReduction):
def pre(self, value):
return value + 1, value * 2
def agg(self, v1, v2):
return v1.sum(), v2.min()
def post(self, v1, v2):
return v1 + v2
def test_groupby(setup):
rs = np.random.RandomState(0)
data_size = 100
data_dict = {'a': rs.randint(0, 10, size=(data_size,)),
'b': rs.randint(0, 10, size=(data_size,)),
'c': rs.choice(list('abcd'), size=(data_size,))}
# test groupby with DataFrames and RangeIndex
df1 = pd.DataFrame(data_dict)
mdf = md.DataFrame(df1, chunk_size=13)
grouped = mdf.groupby('b')
assert_groupby_equal(grouped.execute().fetch(),
df1.groupby('b'))
# test groupby with string index with duplications
df2 = pd.DataFrame(data_dict, index=['i' + str(i % 3) for i in range(data_size)])
mdf = md.DataFrame(df2, chunk_size=13)
grouped = mdf.groupby('b')
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby('b'))
# test groupby with DataFrames by series
grouped = mdf.groupby(mdf['b'])
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby(df2['b']))
# test groupby with DataFrames by multiple series
grouped = mdf.groupby(by=[mdf['b'], mdf['c']])
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby(by=[df2['b'], df2['c']]))
# test groupby with DataFrames with MultiIndex
df3 = pd.DataFrame(data_dict,
index=pd.MultiIndex.from_tuples(
[(i % 3, 'i' + str(i)) for i in range(data_size)]))
mdf = md.DataFrame(df3, chunk_size=13)
grouped = mdf.groupby(level=0)
assert_groupby_equal(grouped.execute().fetch(),
df3.groupby(level=0))
# test groupby with DataFrames by integer columns
df4 = pd.DataFrame(list(data_dict.values())).T
mdf = md.DataFrame(df4, chunk_size=13)
grouped = mdf.groupby(0)
assert_groupby_equal(grouped.execute().fetch(),
df4.groupby(0))
series1 = pd.Series(data_dict['a'])
ms1 = md.Series(series1, chunk_size=13)
grouped = ms1.groupby(lambda x: x % 3)
assert_groupby_equal(grouped.execute().fetch(),
series1.groupby(lambda x: x % 3))
# test groupby series
grouped = ms1.groupby(ms1)
assert_groupby_equal(grouped.execute().fetch(),
series1.groupby(series1))
series2 = pd.Series(data_dict['a'],
index=['i' + str(i) for i in range(data_size)])
ms2 = md.Series(series2, chunk_size=13)
grouped = ms2.groupby(lambda x: int(x[1:]) % 3)
assert_groupby_equal(grouped.execute().fetch(),
series2.groupby(lambda x: int(x[1:]) % 3))
def test_groupby_getitem(setup):
rs = np.random.RandomState(0)
data_size = 100
raw = pd.DataFrame({'a': rs.randint(0, 10, size=(data_size,)),
'b': rs.randint(0, 10, size=(data_size,)),
'c': rs.choice(list('abcd'), size=(data_size,))},
index=pd.MultiIndex.from_tuples([(i % 3, 'i' + str(i)) for i in range(data_size)]))
mdf = md.DataFrame(raw, chunk_size=13)
r = mdf.groupby(level=0)[['a', 'b']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby(level=0)[['a', 'b']], with_selection=True)
for method in ('tree', 'shuffle'):
r = mdf.groupby(level=0)[['a', 'b']].sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(level=0)[['a', 'b']].sum().sort_index())
r = mdf.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b')[['a', 'b']], with_selection=True)
r = mdf.groupby('b')[['a', 'c']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b')[['a', 'c']], with_selection=True)
for method in ('tree', 'shuffle'):
r = mdf.groupby('b')[['a', 'b']].sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].sum().sort_index())
r = mdf.groupby('b')[['a', 'b']].agg(['sum', 'count'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].agg(['sum', 'count']).sort_index())
r = mdf.groupby('b')[['a', 'c']].agg(['sum', 'count'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'c']].agg(['sum', 'count']).sort_index())
r = mdf.groupby('b')[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].transform(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].cumsum()
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].cumsum().sort_index())
r = mdf.groupby('b').a
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b').a, with_selection=True)
for method in ('shuffle', 'tree'):
r = mdf.groupby('b').a.sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.sum().sort_index())
r = mdf.groupby('b').a.agg(['sum', 'mean', 'var'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.agg(['sum', 'mean', 'var']).sort_index())
r = mdf.groupby('b', as_index=False).a.sum(method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).a.sum().sort_values('b', ignore_index=True))
r = mdf.groupby('b', as_index=False).b.count(method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).b.count().sort_values('b', ignore_index=True))
r = mdf.groupby('b', as_index=False).b.agg({'cnt': 'count'}, method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).b.agg({'cnt': 'count'}).sort_values('b', ignore_index=True))
r = mdf.groupby('b').a.apply(lambda x: x + 1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.transform(lambda x: x + 1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.cumsum()
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.cumsum().sort_index())
# special test for selection key == 0
raw = pd.DataFrame(rs.rand(data_size, 10))
raw[0] = 0
mdf = md.DataFrame(raw, chunk_size=13)
r = mdf.groupby(0, as_index=False)[0].agg({'cnt': 'count'}, method='tree')
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(0, as_index=False)[0].agg({'cnt': 'count'}))
def test_dataframe_groupby_agg(setup):
agg_funs = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any', 'skew', 'kurt', 'sem']
rs = np.random.RandomState(0)
raw = pd.DataFrame({'c1': np.arange(100).astype(np.int64),
'c2': rs.choice(['a', 'b', 'c'], (100,)),
'c3': rs.rand(100)})
mdf = md.DataFrame(raw, chunk_size=13)
for method in ['tree', 'shuffle']:
r = mdf.groupby('c2').agg('size', method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg('size').sort_index())
for agg_fun in agg_funs:
if agg_fun == 'size':
continue
r = mdf.groupby('c2').agg(agg_fun, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg_fun).sort_index())
r = mdf.groupby('c2').agg(agg_funs, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg_funs).sort_index())
agg = OrderedDict([('c1', ['min', 'mean']), ('c3', 'std')])
r = mdf.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg).sort_index())
agg = OrderedDict([('c1', 'min'), ('c3', 'sum')])
r = mdf.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg).sort_index())
r = mdf.groupby('c2').agg({'c1': 'min', 'c3': 'min'}, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg({'c1': 'min', 'c3': 'min'}).sort_index())
r = mdf.groupby('c2').agg({'c1': 'min'}, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg({'c1': 'min'}).sort_index())
# test groupby series
r = mdf.groupby(mdf['c2']).sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(raw['c2']).sum().sort_index())
r = mdf.groupby('c2').size(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.groupby('c2').size())
# test inserted kurt method
r = mdf.groupby('c2').kurtosis(method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby('c2').kurtosis())
for agg_fun in agg_funs:
if agg_fun == 'size' or callable(agg_fun):
continue
r = getattr(mdf.groupby('c2'), agg_fun)(method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
getattr(raw.groupby('c2'), agg_fun)())
# test as_index=False
for method in ['tree', 'shuffle']:
r = mdf.groupby('c2', as_index=False).agg('mean', method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('c2', ignore_index=True),
raw.groupby('c2', as_index=False).agg('mean').sort_values('c2', ignore_index=True))
assert r.op.groupby_params['as_index'] is False
r = mdf.groupby(['c1', 'c2'], as_index=False).agg('mean', method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'c2'], ignore_index=True),
raw.groupby(['c1', 'c2'], as_index=False).agg('mean').sort_values(['c1', 'c2'], ignore_index=True))
assert r.op.groupby_params['as_index'] is False
# test as_index=False takes no effect
r = mdf.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count'])
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count']))
assert r.op.groupby_params['as_index'] is True
r = mdf.groupby('c2').agg(['cumsum', 'cumcount'])
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(['cumsum', 'cumcount']).sort_index())
r = mdf[['c1', 'c3']].groupby(mdf['c2']).agg(MockReduction2())
pd.testing.assert_frame_equal(r.execute().fetch(),
raw[['c1', 'c3']].groupby(raw['c2']).agg(MockReduction2()))
r = mdf.groupby('c2').agg(sum_c1=md.NamedAgg('c1', 'sum'), min_c1=md.NamedAgg('c1', 'min'),
mean_c3=md.NamedAgg('c3', 'mean'), method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby('c2').agg(sum_c1=md.NamedAgg('c1', 'sum'),
min_c1=md.NamedAgg('c1', 'min'),
mean_c3=md.NamedAgg('c3', 'mean')))
def test_series_groupby_agg(setup):
rs = np.random.RandomState(0)
series1 = pd.Series(rs.rand(10))
ms1 = md.Series(series1, chunk_size=3)
agg_funs = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any', 'skew', 'kurt', 'sem']
for method in ['tree', 'shuffle']:
for agg_fun in agg_funs:
r = ms1.groupby(lambda x: x % 2).agg(agg_fun, method=method)
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(agg_fun))
r = ms1.groupby(lambda x: x % 2).agg(agg_funs, method=method)
pd.testing.assert_frame_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(agg_funs))
# test groupby series
r = ms1.groupby(ms1).sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(series1).sum().sort_index())
r = ms1.groupby(ms1).sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(series1).sum().sort_index())
# test inserted kurt method
r = ms1.groupby(ms1).kurtosis(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(series1).kurtosis())
for agg_fun in agg_funs:
r = getattr(ms1.groupby(lambda x: x % 2), agg_fun)(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
getattr(series1.groupby(lambda x: x % 2), agg_fun)())
r = ms1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount'], method='tree')
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount']).sort_index())
r = ms1.groupby(lambda x: x % 2).agg(MockReduction2(name='custom_r'), method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(MockReduction2(name='custom_r')))
r = ms1.groupby(lambda x: x % 2).agg(col_var='var', col_skew='skew', method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(col_var='var', col_skew='skew'))
def test_groupby_agg_auto_method(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame({'c1': rs.randint(20, size=100),
'c2': rs.choice(['a', 'b', 'c'], (100,)),
'c3': rs.rand(100)})
mdf = md.DataFrame(raw, chunk_size=20)
def _disallow_reduce(ctx, op):
assert op.stage != OperandStage.reduce
op.execute(ctx, op)
r = mdf.groupby('c2').agg('sum')
operand_executors = {DataFrameGroupByAgg: _disallow_reduce}
result = r.execute(extra_config={'operand_executors': operand_executors,
'check_all': False}).fetch()
pd.testing.assert_frame_equal(result.sort_index(),
raw.groupby('c2').agg('sum'))
def _disallow_combine_and_agg(ctx, op):
assert op.stage not in (OperandStage.combine, OperandStage.agg)
op.execute(ctx, op)
r = mdf.groupby('c1').agg('sum')
operand_executors = {DataFrameGroupByAgg: _disallow_combine_and_agg}
result = r.execute(extra_config={'operand_executors': operand_executors,
'check_all': False}).fetch()
pd.testing.assert_frame_equal(result.sort_index(),
raw.groupby('c1').agg('sum'))
def test_groupby_agg_str_cat(setup):
agg_fun = lambda x: x.str.cat(sep='_', na_rep='NA')
rs = np.random.RandomState(0)
raw_df = pd.DataFrame({'a': rs.choice(['A', 'B', 'C'], size=(100,)),
'b': rs.choice([None, 'alfa', 'bravo', 'charlie'], size=(100,))})
mdf = md.DataFrame(raw_df, chunk_size=13)
r = mdf.groupby('a').agg(agg_fun, method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw_df.groupby('a').agg(agg_fun))
raw_series = pd.Series(rs.choice([None, 'alfa', 'bravo', 'charlie'], size=(100,)))
ms = md.Series(raw_series, chunk_size=13)
r = ms.groupby(lambda x: x % 2).agg(agg_fun, method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
raw_series.groupby(lambda x: x % 2).agg(agg_fun))
@require_cudf
def test_gpu_groupby_agg(setup_gpu):
rs = np.random.RandomState(0)
df1 = pd.DataFrame({'a': rs.choice([2, 3, 4], size=(100,)),
'b': rs.choice([2, 3, 4], size=(100,))})
mdf = md.DataFrame(df1, chunk_size=13).to_gpu()
r = mdf.groupby('a').sum()
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').sum())
r = mdf.groupby('a').kurt()
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').kurt())
r = mdf.groupby('a').agg(['sum', 'var'])
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').agg(['sum', 'var']))
rs = np.random.RandomState(0)
idx = pd.Index(np.where(rs.rand(10) > 0.5, 'A', 'B'))
series1 = pd.Series(rs.rand(10), index=idx)
ms = md.Series(series1, index=idx, chunk_size=3).to_gpu().to_gpu()
r = ms.groupby(level=0).sum()
pd.testing.assert_series_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).sum())
r = ms.groupby(level=0).kurt()
pd.testing.assert_series_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).kurt())
r = ms.groupby(level=0).agg(['sum', 'var'])
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).agg(['sum', 'var']))
def test_groupby_apply(setup):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
def apply_df(df, ret_series=False):
df = df.sort_index()
df.a += df.b
if len(df.index) > 0:
if not ret_series:
df = df.iloc[:-1, :]
else:
df = df.iloc[-1, :]
return df
def apply_series(s, truncate=True):
s = s.sort_index()
if truncate and len(s.index) > 0:
s = s.iloc[:-1]
return s
mdf = md.DataFrame(df1, chunk_size=3)
applied = mdf.groupby('b').apply(lambda df: None)
pd.testing.assert_frame_equal(applied.execute().fetch(),
df1.groupby('b').apply(lambda df: None))
applied = mdf.groupby('b').apply(apply_df)
pd.testing.assert_frame_equal(applied.execute().fetch().sort_index(),
df1.groupby('b').apply(apply_df).sort_index())
applied = mdf.groupby('b').apply(apply_df, ret_series=True)
pd.testing.assert_frame_equal(applied.execute().fetch().sort_index(),
df1.groupby('b').apply(apply_df, ret_series=True).sort_index())
applied = mdf.groupby('b').apply(lambda df: df.a, output_type='series')
pd.testing.assert_series_equal(applied.execute().fetch().sort_index(),
df1.groupby('b').apply(lambda df: df.a).sort_index())
applied = mdf.groupby('b').apply(lambda df: df.a.sum())
pd.testing.assert_series_equal(applied.execute().fetch().sort_index(),
df1.groupby('b').apply(lambda df: df.a.sum()).sort_index())
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
applied = ms1.groupby(lambda x: x % 3).apply(lambda df: None)
pd.testing.assert_series_equal(applied.execute().fetch(),
series1.groupby(lambda x: x % 3).apply(lambda df: None))
applied = ms1.groupby(lambda x: x % 3).apply(apply_series)
pd.testing.assert_series_equal(applied.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 3).apply(apply_series).sort_index())
sindex2 = pd.MultiIndex.from_arrays([list(range(9)), list('ABCDEFGHI')])
series2 = pd.Series(list('CDECEDABC'), index=sindex2)
ms2 = md.Series(series2, chunk_size=3)
applied = ms2.groupby(lambda x: x[0] % 3).apply(apply_series)
pd.testing.assert_series_equal(applied.execute().fetch().sort_index(),
series2.groupby(lambda x: x[0] % 3).apply(apply_series).sort_index())
def test_groupby_transform(setup):
df1 = pd.DataFrame({
'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce'),
'd': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'e': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'f': list('aabaaddce'),
})
def transform_series(s, truncate=True):
s = s.sort_index()
if truncate and len(s.index) > 1:
s = s.iloc[:-1].reset_index(drop=True)
return s
mdf = md.DataFrame(df1, chunk_size=3)
r = mdf.groupby('b').transform(transform_series, truncate=False)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').transform(transform_series, truncate=False).sort_index())
if pd.__version__ != '1.1.0':
r = mdf.groupby('b').transform(['cummax', 'cumsum'], _call_agg=True)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').agg(['cummax', 'cumsum']).sort_index())
agg_list = ['cummax', 'cumsum']
r = mdf.groupby('b').transform(agg_list, _call_agg=True)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').agg(agg_list).sort_index())
agg_dict = OrderedDict([('d', 'cummax'), ('b', 'cumsum')])
r = mdf.groupby('b').transform(agg_dict, _call_agg=True)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').agg(agg_dict).sort_index())
agg_list = ['sum', lambda s: s.sum()]
r = mdf.groupby('b').transform(agg_list, _call_agg=True)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').agg(agg_list).sort_index())
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
r = ms1.groupby(lambda x: x % 3).transform(lambda x: x + 1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 3).transform(lambda x: x + 1).sort_index())
r = ms1.groupby(lambda x: x % 3).transform('cummax', _call_agg=True)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 3).agg('cummax').sort_index())
agg_list = ['cummax', 'cumcount']
r = ms1.groupby(lambda x: x % 3).transform(agg_list, _call_agg=True)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 3).agg(agg_list).sort_index())
def test_groupby_cum(setup):
df1 = pd.DataFrame({'a': [3, 5, 2, 7, 1, 2, 4, 6, 2, 4],
'b': [8, 3, 4, 1, 8, 2, 2, 2, 2, 3],
'c': [1, 8, 8, 5, 3, 5, 0, 0, 5, 4]})
mdf = md.DataFrame(df1, chunk_size=3)
for fun in ['cummin', 'cummax', 'cumprod', 'cumsum']:
r1 = getattr(mdf.groupby('b'), fun)()
pd.testing.assert_frame_equal(r1.execute().fetch().sort_index(),
getattr(df1.groupby('b'), fun)().sort_index())
r2 = getattr(mdf.groupby('b'), fun)(axis=1)
pd.testing.assert_frame_equal(r2.execute().fetch().sort_index(),
getattr(df1.groupby('b'), fun)(axis=1).sort_index())
r3 = mdf.groupby('b').cumcount()
pd.testing.assert_series_equal(r3.execute().fetch().sort_index(),
df1.groupby('b').cumcount().sort_index())
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
for fun in ['cummin', 'cummax', 'cumprod', 'cumsum', 'cumcount']:
r1 = getattr(ms1.groupby(lambda x: x % 2), fun)()
pd.testing.assert_series_equal(r1.execute().fetch().sort_index(),
getattr(series1.groupby(lambda x: x % 2), fun)().sort_index())
def test_groupby_head(setup):
df1 = pd.DataFrame({'a': [3, 5, 2, 7, 1, 2, 4, 6, 2, 4],
'b': [8, 3, 4, 1, 8, 2, 2, 2, 2, 3],
'c': [1, 8, 8, 5, 3, 5, 0, 0, 5, 4],
'd': [9, 7, 6, 3, 6, 3, 2, 1, 5, 8]})
# test single chunk
mdf = md.DataFrame(df1)
r = mdf.groupby('b').head(1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').head(1))
r = mdf.groupby('b').head(-1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').head(-1))
r = mdf.groupby('b')['a', 'c'].head(1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b')['a', 'c'].head(1))
# test multiple chunks
mdf = md.DataFrame(df1, chunk_size=3)
r = mdf.groupby('b').head(1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b').head(1))
# test head with selection
r = mdf.groupby('b')['a', 'd'].head(1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b')['a', 'd'].head(1))
r = mdf.groupby('b')['c', 'a', 'd'].head(1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
df1.groupby('b')['c', 'a', 'd'].head(1))
r = mdf.groupby('b')['c'].head(1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
df1.groupby('b')['c'].head(1))
# test single chunk
series1 = | pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3]) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import re
import os
from tqdm.auto import tqdm
def read_extraction_result_dat(filename):
"""extract integrated data. The file to be read must be the result of
an extraction of a .stat file.
Parameters
----------
filename : [type]
[description]
Returns
-------
[type]
[description]
"""
# read header
with open(filename) as f:
list_cols = []
line_content = f.readline()
while line_content[0] == "#":
list_cols.append(line_content)
line_content = f.readline()
nrows_header = len(list_cols)
cols_names = ''.join(list_cols).replace("#", "").replace("\n", "").split(" ")
cols_names = [col for col in cols_names if len(col) > 6]
cols_names = [col[1:] if col[0] == " " else col for col in cols_names]
# read rest of file
df = pd.read_csv(
filename, header=0, names=cols_names,
engine="python", skiprows=nrows_header, sep=" "
)
return df
def read_dat_file(filename):
first_lines = []
num_lines_to_skip = 3
with open(f"./data/extraction_result/{filename}") as f:
for _ in range(num_lines_to_skip + 1):
line = f.readline()
while line[0] == "#":
first_lines.append(line)
line = f.readline()
num_lines_to_skip += 1
# withdraw two last
first_lines = first_lines[:-2]
# withdraw # (first) and \n (last)
first_lines = [line[1:-2] for line in first_lines]
cols = re.compile(r"\s+\[\d+\]").split(' '.join(first_lines))
cols[0] = cols[0][3:]
# if rwo lines or more have the same name
a,b = np.unique(cols, return_counts=True)
if a[b >= 2].size > 0:
for elt in a[b >= 2]:
cols = [f"{col}_{i}" if col == elt else col for i,col in enumerate(cols)]
grid = pd.read_csv(
f"./data/extraction_result/{filename}",
sep=" ", skiprows=num_lines_to_skip, engine='python', header=0, names=cols)
return grid
def extract_simulation_parameters(filename):
p = re.compile("[nrA]")
params = filename.split('_')[1]
params_str = p.split(params)[1:]
params_float = [float(param.replace('p', '.')) for param in params_str]
return params_float # return [nH, G0, Avmax]
def is_increasing(df_inter, idx_col):
diffs = np.diff(df_inter.iloc[:,idx_col], 1)
return np.all(diffs > -1e-8)
def attains_lim(df_inter, idx_col, lim=1):
return np.max(df_inter.iloc[:,idx_col]) >= lim
def where_attains_lim(df_inter, idx_col, lim=1):
idx_attains = np.argmin(np.abs(df_inter.iloc[:, idx_col] - lim))
return df_inter.index[idx_attains]
def scrape_all_files(Avmax="1e1"):
list_files = os.listdir("./data/extraction_result")
list_opticaldepth_files = [filename for filename in list_files if f"A{Avmax}_a_20.pop" in filename]
list_results = []
for filename_pop in tqdm(list_opticaldepth_files):
simulation_params = extract_simulation_parameters(filename_pop)
df_simulation_pop = read_dat_file(filename_pop)
filename_optdepth = filename_pop.replace(".pop", ".OptDepth")
df_simulation_optdepth = read_dat_file(filename_optdepth)
df_simulation = | pd.merge(df_simulation_pop, df_simulation_optdepth, on='AV') | pandas.merge |
# coding:utf-8
#
# Kerasでゼロから作るDeep LearningのCNNを作成する
#
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import pandas as pd
import matplotlib.pyplot as plt
batch_size = 32
num_classes = 10
epochs = 20
# input image demensions
img_rows, img_cols = 28, 28
# the data, split between tran and sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 画像のチャンネルが先か後かでデータをreshapeする
print(K.image_data_format())
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print(x_train.shape)
#(60000, 28, 28, 1)が出力される
# 学習データを0-1の範囲に正規化する
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convertclass vectors to binary matrices
# one_hot_label に変換する処理
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#
# ここからモデルの構築
# Conv-ReLU-Pooling -- Affine-ReLU -- Affine-Softmax
#
model = Sequential()
model.add(Conv2D(30, kernel_size=(5, 5), strides=1,
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
# 過学習を防げるか? Dropoutを追加
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
# Softmaxの前にもDropoutを追加する
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))
# 損失関数 交差エントロピー誤差平均
# 最適化関数 Adadelta
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# 学習を行う
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# plot learning history
plt.style.use("ggplot")
df = | pd.DataFrame(history.history) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from pathlib import Path
import os
######################################### WRANGLING DIAGNOSIS_HOSP_ADM TABLE DATA #########################################
V3_data_folder = Path("C:/Users/Jesus/Desktop/Master/TFM/Datos/19_04_2021/")
V2_data_folder = Path("C:/Users/Jesus/Desktop/Master/TFM/Datos/20_07_2020/")
V3_diagnosis_hosp_adm = V3_data_folder / "COVID_DSL_05.csv"
V2_diagnosis_hosp_adm = V2_data_folder / "CDSL_06.csv"
### V2 CDSL_06 - Data wrangling: change delimiter from ";" to "|" and remove lines with "IDINGRESO" field empty
data = []
try: # open file in read mode
fp = open(V2_diagnosis_hosp_adm, "r", newline="\r\n", encoding = "utf-8")
for line in fp:
line = line.replace(";", "|")
data.append(line)
finally:
fp.close()
try: # open file in writing mode
f = open(V2_diagnosis_hosp_adm, "w", newline="\n", encoding = "utf-8")
for line in data :
if line[1] != "|": #Removing lines with "IDINGRESO" field empty
print(line)
f.write(line)
else:
print(line)
finally:
f.close()
### V2 CDSL_06 - Data wrangling: Reordering fields according to V3
#V3 COVID_DSL_05
# Load csv data into dataframe
df_V3 = pd.read_csv(V3_diagnosis_hosp_adm, delimiter="|", encoding="utf-8")
# Get columns name
df_V3_columns = df_V3.columns.tolist()
# V2 CDSL_06
# Load csv data into dataframe
df_V2 = | pd.read_csv(V2_diagnosis_hosp_adm, delimiter="|", encoding="utf-8") | pandas.read_csv |
import json
from datetime import datetime
import pandas as pd
from autogluon import TabularPrediction as task
data_path = "./data/plasma/plasma"
label_column = "RETPLASMA"
fold1 = pd.read_csv(data_path + "-fold1.csv")
fold2 = | pd.read_csv(data_path + "-fold2.csv") | pandas.read_csv |
#assign peak information to each gene
#from supp. table 5 of 2012 nature TCGA breast cancer paper
#july 9 2014
import pandas as pd
import numpy as np
basalamp = pd.read_csv('Supplementary Table 5 revised basal amp.txt',sep = '\t',header=0)
totalamp = | pd.read_csv('Supplementary Table 5 revised total amp.txt',sep = '\t',header=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 11:57:52 2021
@author: smullally
"""
import Susan
import matplotlib.pyplot as plt
import numpy as np
outdir = "/Users/smullally/Science/tess_monitor_standards/paper/plots/"
#Directory of the data files
ddir = "/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/"
#The list of names and filenames
infile = "/Users/smullally/Science/tess_monitor_standards/paper/plots/inputfilenames.csv"
filenames = np.loadtxt(infile, dtype=str, delimiter=',')
#This needs to be run first and then gets filled in below, one at a time.
stats = np.zeros((len(filenames[:,0]),5))
#%%
#for f in filenames[:,0]:
# Susan.create_lcft_plot(ddir+f, periods = [.004,12], times=None)
#%%
i = 0
pers = [0.5,12]
times = [2174, 2230]
label = "%s\nTIC %u\nSector 32-33" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%%
i = 1
pers = [0.1,12]
times = [2204, 2214.5]
label = "%s\nTIC %u\nSector 33" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 2
pers = [0.08,12]
times = None
label = "%s\nTIC %u\nSector 5" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%%
i = 3
pers = [0.2,12]
times =[2102, 2113.5]
label = "%s\nTIC %u\nSector 29" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 4
pers = [0.015,12]
times = [1815.8, 1828]
label = "%s\nTIC %u\nSector 19" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 5
pers = [0.014,5]
times = [2406,2409]
label = "%s\nTIC %u\nSector 40" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 6
pers = [0.05,10]
times = None
label = "%s\nTIC %u\nSector 40" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 7
pers = [0.01,12]
times = [2389.5,2404.9]
label = "%s\nTIC %u\nSector 40" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 8
pers = [0.4,12]
times = [2390, 2405]
label = "%s\nTIC %u\nSector 40" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 9
pers = [0.4,12]
times = [1751,1763.5]
label = "%s\nTIC %u\nSector 16" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 10
pers = [0.2,12]
times = [1855.8,1869]
label = "%s\nTIC %u\nSector 20" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 11
pers = [0.4,12]
times = None
label = "%s\nTIC %u\nSector 21" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#
i = 12
pers = [0.04,8]
times = None
label = "%s\nTIC %u\nSector 33" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 13
pers = [0.6,14]
times = None
label = "%s\nTIC %u\nSector 1" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%
i = 14
pers = [0.2,12]
times = None
label = "%s\nTIC %u\nSector 32" % (filenames[i,1], int(filenames[i,0][7:17]))
Susan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)
plt.savefig(outdir + filenames[i,0] + ".png")
ret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)
stats[i,:] = ret
#%%
#Need run above cells first
ofn = "/Users/smullally/Science/tess_monitor_standards/paper/variable_stats.csv"
form = ("%u", "%5.5f", "%.4f", "%.4f", "%.4f")
np.savetxt(fname= ofn, X = stats, delimiter=",",
header = "TIC,period_at_max,max_amplitude,2sigma_pkpk,3pk2pk",
fmt = form)
#%%
#Get Crowdsap for all stars.
import lightkurve as lk
import pandas as p
target_name_file = "/Users/smullally/Science/tess_monitor_standards/paper/target_names.csv"
targets = | p.read_csv(target_name_file,delimiter=',', header="infer") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 16:49:58 2020
@author: jdfab
"""
import os
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
cards = pd.read_csv('C:/Users/jdfab/Dropbox/Bridge/OCR/test_files/cards.names',header=None)
cards.columns = ['card']
file_path = 'C:/Users/jdfab/Dropbox/Bridge/OCR/test_files/test10_out/6/'
file_path = 'C:/Users/jdfab/Dropbox/Bridge/OCR/test_files/'
def get_data_frame_from_files(file_path,cards,confidence_threshold=95):
'''
This function should return a dataframe with the best guess
of the position of each card, given the output of the predictive model
The output is a dataframe with index 'card' which contains card names
in the format 'AC', '6H', etc., and two numerical columns.
Current logic is *very* simple- just take the mean of all predictions
above a certain confidence level to get the x,y coordinates.
This seems to give passable reuslts on some outputs, but needs more work.
'''
boxes = {}
for file_name in os.listdir(file_path):
if file_name[-4:] == '.txt':
if os.path.getsize(file_path+file_name) > 0:
print(file_name)
file_data = pd.read_csv(file_path + file_name,header=None, sep = ' ')
file_data.columns = ['label','x_ratio','y_ratio','width','height','confidence']
file_data['Name'] = file_name[file_name.find('_')+1:-4]
boxes[file_name] = file_data
boxes = | pd.concat(boxes[file_name] for file_name in boxes) | pandas.concat |
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
from auth import client
# List of all names of the sheets you want to work on
titles = []
for spreadsheet in client.openall():
if spreadsheet.title != 'Parent Sheet':
titles.append(spreadsheet.title)
parent_sheet = client.open('Parent Sheet')
# parent_sheet.get_worksheet(0).clear()
# parent_sheet.get_worksheet(0).update([df.columns.values.tolist()])
parent_df = pd.DataFrame(parent_sheet.get_worksheet(0).get_all_records())
for sheet_name in titles:
sheet = client.open(sheet_name)
list_of_dicts = sheet.get_worksheet(0).get_all_records()
df = pd.DataFrame(list_of_dicts)
# use join ,merge or concatenate join dataframes and update it
final_df = | pd.concat([parent_df, df]) | pandas.concat |
from resqdb.Connection import Connection
from resqdb.functions import save_file
from datetime import datetime
import logging
import os
import sys
import json
import pandas as pd
from pptx import Presentation
from pptx.util import Cm, Pt, Inches
from pptx.dml.color import RGBColor
from pptx.chart.data import CategoryChartData, ChartData
from pptx.enum.chart import XL_CHART_TYPE, XL_TICK_MARK, XL_TICK_LABEL_POSITION, XL_LEGEND_POSITION, XL_LABEL_POSITION
from pptx.enum.dml import MSO_LINE
from pptx.oxml.xmlchemy import OxmlElement
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell, xl_col_to_name
class AfricaReport():
''' Generate reports for South Africa.
:param df: the raw data or preprocessed data
:type df: DataFrame
:param start_date: starting date included in filtered data
:type start_date: datetime
:param end_date: last date included in the filtered data
:type end_date: datetime
:param period_name: the name of the period
:type period_name: str
:param report_type: the name of the report
:type report_type: str
:param split: True if site reports should be generated as well
:type split: boolean
'''
def __init__(self, df=None, start_date=None, end_date=None, period_name=None, report_type=None, site_reports=False, region_reports=False):
# Set logging
debug = f'debug_{datetime.now().strftime("%d-%m-%Y")}.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(
filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG
)
logging.info('Start to generate reports for South Africa.')
# Get country code
self.country_code = 'ZA'
self.country_name = 'South Africa'
self.period_name = period_name
self.report_type = report_type
self.site_reports = site_reports
self.region_reports = region_reports
self.region_name = None
# Connect to database and get Africa data
if df is None:
con = Connection(data='africa')
self.raw_data = con.preprocessed_data.copy()
self.raw_data = self._preprocess_data(df=self.raw_data)
logging.info('The preprocessed data were generated.')
else:
# Convert hospital date into datetime if data were read from csv and derive the format of the date
date = df['HOSPITAL_DATE'].iloc[0]
if '/' in date:
dateForm = '%d/%m/%Y'
else:
dateForm = '%Y-%m-%d'
self.raw_data = df.copy()
# Get all columns with DATE in the name
columns = [x for x in self.raw_data.columns.tolist() if 'DATE' in x]
for column in columns:
self.raw_data[column] = pd.to_datetime(self.raw_data[column], format=dateForm, errors='ignore')
# Read regions mapping from the json file
path = os.path.join(os.path.dirname(__file__), 'tmp', 'south_africa_mapping.json')
with open(path, 'r', encoding='utf-8') as json_file:
self.regions = json.load(json_file)
# Create REGION column in the dataframe based on the region in the SITE ID
self.raw_data['REGION'] = self.raw_data.apply(
lambda x: self._get_region(x['SITE_ID']), axis=1
)
if 'SITE_OID' in self.raw_data.columns:
del self.raw_data['SITE_OID']
# If start date and end date are defined, filter data by hospital date otherwise keep all data
if start_date is None and end_date is None:
self.preprocessed_data = self.raw_data
else:
self.preprocessed_data = self._filter_by_date(self.raw_data, start_date, end_date)
logging.info('The data has been filter by date.')
self._columns_to_be_deleted = []
# Add all data into dataframe again, this data will be set as country results, therefore we have to modify beofre appending SITE_ID, FACILITY_NAME and REGION
country_df = self.preprocessed_data.copy()
country_df['SITE_ID'] = self.country_name
country_df['FACILITY_NAME'] = self.country_name
country_df['REGION'] = self.country_name
self.preprocessed_data = self.preprocessed_data.append(country_df, ignore_index=True)
###########################
# Generate country report #
# Calculate statistic
self.calculate_statistics(self.preprocessed_data)
# generate formatted statistic
if self.report_type == 'all' and self.period_name == 'all':
filename = self.country_code
else:
filename = f'{self.report_type}_{self.country_code}_{self.period_name}'
self._generate_formatted_preprocessed_data(self.preprocessed_data, filename)
self._generate_formatted_stats(self.stats, filename)
# Generate presetation
self._generate_presentation(self.stats, filename)
logging.info('The country report has been generated.')
if region_reports:
region_preprocessed_data = self.preprocessed_data.copy()
region_preprocessed_data['SITE_ID'] = region_preprocessed_data['REGION']
region_preprocessed_data['FACILITY_NAME'] = region_preprocessed_data['REGION']
self.calculate_statistics(region_preprocessed_data)
if self.report_type == 'all' and self.period_name == 'all':
filename = f'{self.country_code}_regions'
else:
filename = f'{self.report_type}_{self.country_code}_{self.period_name}_regions'
self._generate_formatted_preprocessed_data(region_preprocessed_data, filename)
self._generate_formatted_stats(self.stats, filename)
# Generate presetation
self._generate_presentation(self.stats, filename)
logging.info('The country vs regions report has been generated.')
if site_reports:
# Get list of site ids in the filtered preprocessed data
site_ids = [x for x in set(self.preprocessed_data['SITE_ID'].tolist()) if x != self.country_name]
# Iterate over site ID and for each site ID generate report
for site_id in site_ids:
self.region_name = self._get_region(site_id)
# Filter data for site and country
site_preprocessed_data = self.preprocessed_data.loc[
(self.preprocessed_data['SITE_ID'] == site_id) |
(self.preprocessed_data['SITE_ID'] == self.country_name)
].copy()
print(site_preprocessed_data)
site_name = site_preprocessed_data.loc[site_preprocessed_data['SITE_ID'] == site_id]['FACILITY_NAME'].iloc[0]
# Append data for region to the site preprocessed data
region_preprocessed_data = self.preprocessed_data.loc[
self.preprocessed_data['REGION'] == self.region_name
].copy()
region_preprocessed_data['SITE_ID'] = self.region_name
region_preprocessed_data['FACILITY_NAME'] = self.region_name
site_preprocessed_data = site_preprocessed_data.append(
region_preprocessed_data, ignore_index=True
)
self.calculate_statistics(site_preprocessed_data)
if self.report_type == 'all' and self.period_name == 'all':
filename = site_id
else:
filename = f'{self.report_type}_{site_id}_{self.period_name}'
self._generate_formatted_preprocessed_data(site_preprocessed_data, filename, exclude_country=True)
self._generate_formatted_stats(self.stats, filename)
# Generate presetation
self._generate_presentation(self.stats, filename, site_name)
logging.info(f'The site report for {site_id} has been generated.')
@property
def stats(self):
return self._stats
@stats.setter
def stats(self, df):
self._stats = df
@property
def columns_to_be_deleted(self):
return self._columns_to_be_deleted
@columns_to_be_deleted.setter
def columns_to_be_deleted(self, value):
self._column_to_be_deleted = value
@property
def country_code(self):
return self._country_code
@country_code.setter
def country_code(self, value):
self._country_code = value
@property
def country_name(self):
return self._country_name
@country_name.setter
def country_name(self, value):
self._country_name = value
@property
def region_name(self):
return self._region_name
@region_name.setter
def region_name(self, value):
self._region_name = value
def _get_region(self, site_id):
''' Get region name based on code in Site ID.
:param site_id: the site ID
:type site_id: str
:returns: the name of the region
:rtype: str
'''
region_code = site_id.split('_')[1]
if region_code in self.regions:
return self.regions[region_code]
else:
return 'Demo'
def _filter_by_date(self, df, start_date, end_date):
''' Filter data by DISCHARGE DATE where discharge date is between start and end date.
:param df: the dataframe to be filtered
:type df: DataFrame
:param start: first date to be included
:type start: datetime
:param end: last date to be included
:type end: datetime
:returns: the filtered dataframe
:rtype: DataFrame
'''
if isinstance(start_date, datetime):
start_date = start_date.date()
if isinstance(end_date, datetime):
end_date = end_date.date()
start_date = | pd.Timestamp(start_date) | pandas.Timestamp |
import sys
import numpy as np
import pandas as pd
import json
import os
from joblib import Parallel, delayed
from gtad_lib import opts
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def get_infer_dict(opt):
df = pd.read_csv(opt["video_info"])
json_data = load_json(opt["video_anno"])
database = json_data
video_dict = {}
for i in range(len(df)):
video_name = df.video.values[i]
video_info = database[video_name]
video_new_info = {}
video_new_info['duration_frame'] = video_info['duration_frame']
video_new_info['duration_second'] = video_info['duration_second']
video_new_info["feature_frame"] = video_info['feature_frame']
video_subset = df.subset.values[i]
video_new_info['annotations'] = video_info['annotations']
if video_subset == 'validation':
video_dict[video_name] = video_new_info
return video_dict
def Soft_NMS(df, nms_threshold=1e-5, num_prop=100):
'''
From BSN code
:param df:
:param nms_threshold:
:return:
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
# # frost: I use a trick here, remove the detection
# # which is longer than 300
# for idx in range(0, len(tscore)):
# if tend[idx] - tstart[idx] >= 300:
# tscore[idx] = 0
while len(tscore) > 1 and len(rscore) < num_prop and max(tscore) > 0:
max_index = tscore.index(max(tscore))
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = IOU(tstart[max_index], tend[max_index], tstart[idx], tend[idx])
if tmp_iou > 0:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) / nms_threshold)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = | pd.DataFrame() | pandas.DataFrame |
"""
Written by <NAME>, 22-10-2018
This script contains functions for data formatting and accuracy assessment of keras models
"""
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from math import sqrt
import numpy as np
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# model cost function
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
# scale and format observed data as train/test inputs/labels
def format_obs_data(full_data, n_lags, n_ahead, n_train):
# split datetime column into train and test for plots
train_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
values = full_data[['GWL', 'Tide', 'Precip.']].values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_fit = gwl_scaler.fit(gwl)
gwl_scaled = gwl_fit.transform(gwl)
tide_fit = tide_scaler.fit(tide)
tide_scaled = tide_fit.transform(tide)
rain_fit = rain_scaler.fit(rain)
rain_scaled = rain_fit.transform(rain)
# frame as supervised learning
gwl_super = series_to_supervised(gwl_scaled, n_lags, n_ahead)
gwl_super_values = gwl_super.values
tide_super = series_to_supervised(tide_scaled, n_lags, n_ahead)
tide_super_values = tide_super.values
rain_super = series_to_supervised(rain_scaled, n_lags, n_ahead)
rain_super_values = rain_super.values
# split groundwater into inputs and labels
gwl_input, gwl_labels = gwl_super_values[:, 0:n_lags+1], gwl_super_values[:, n_lags+1:]
# split into train and test sets
train_X = np.concatenate((gwl_input[:n_train, :], tide_super_values[:n_train, :], rain_super_values[:n_train, :]),
axis=1)
test_X = np.concatenate((gwl_input[n_train:, :], tide_super_values[n_train:, :], rain_super_values[n_train:, :]),
axis=1)
train_y, test_y = gwl_labels[:n_train, :], gwl_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_dates, test_dates, tide_fit, rain_fit, gwl_fit, train_X, test_X, train_y, test_y
# scale and format storm data as train/test inputs/labels
def format_storm_data(storm_data, n_train, tide_fit, rain_fit, gwl_fit):
# separate storm data into gwl, tide, and rain
storm_scaled = pd.DataFrame(storm_data["Datetime"])
for col in storm_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
storm_scaled[col] = col_scaled
# split storm data into inputs and labels
storm_values = storm_scaled[storm_scaled.columns[1:]].values
storm_input, storm_labels = storm_values[:, :-18], storm_values[:, -18:]
# split into train and test sets
train_X, test_X = storm_input[:n_train, :], storm_input[n_train:, :]
train_y, test_y = storm_labels[:n_train, :], storm_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_X, test_X, train_y, test_y
# scale and format forecast data as train/test inputs/labels
def format_fcst_data(fcst_data, tide_fit, rain_fit, gwl_fit):
# separate forecast data into gwl, tide, and rain
fcst_scaled = pd.DataFrame(fcst_data["Datetime"])
for col in fcst_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
fcst_scaled[col] = col_scaled
# split fcst data into inputs and labels
fcst_values = fcst_scaled[fcst_scaled.columns[1:]].values
fcst_input, fcst_labels = fcst_values[:, :-18], fcst_values[:, -18:]
# reshape fcst input to be 3D [samples, timesteps, features]
fcst_test_X = fcst_input.reshape((fcst_input.shape[0], 1, fcst_input.shape[1]))
print("forecast input data shape:", fcst_test_X.shape, "forecast label data shape:", fcst_labels.shape)
return fcst_test_X, fcst_labels
# create df of full observed data and predictions and extract storm data
def full_pred_df(test_dates, storm_data, n_lags, n_ahead, inv_y, inv_yhat):
dates_t1 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 1:-n_ahead + 2])
dates_t1 = dates_t1.reset_index(inplace=False, drop=True)
dates_9 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 9:-n_ahead + 10])
dates_9 = dates_9.reset_index(inplace=False, drop=True)
dates_18 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 18:])
dates_18 = dates_18.reset_index(inplace=False, drop=True)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["Obs. GWL t+1", "Pred. GWL t+1"])
df_t1 = pd.concat([df_t1, dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["Obs. GWL t+9", "Pred. GWL t+9"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = pd.DataFrame(df_t18, index=None, columns=["Obs. GWL t+18", "Pred. GWL t+18"])
df_t18 = pd.concat([df_t18, dates_18], axis=1)
df_t18 = df_t18.set_index("Datetime")
storm_dates_t1 = storm_data[['gwl(t+1)']]
storm_dates_t1.index = storm_dates_t1.index + pd.DateOffset(hours=1)
storm_dates_t9 = storm_data[['gwl(t+9)']]
storm_dates_t9.index = storm_dates_t9.index + pd.DateOffset(hours=9)
storm_dates_t18 = storm_data[['gwl(t+18)']]
storm_dates_t18.index = storm_dates_t18.index + pd.DateOffset(hours=18)
df_t1_storms = np.asarray(df_t1[df_t1.index.isin(storm_dates_t1.index)])
df_t9_storms = np.asarray(df_t9[df_t9.index.isin(storm_dates_t9.index)])
df_t18_storms = np.asarray(df_t18[df_t18.index.isin(storm_dates_t18.index)])
storms_list = [df_t1_storms, df_t9_storms, df_t18_storms]
return df_t1, df_t9, df_t18, storms_list
# create df of storm observed data and predictions
def storm_pred_df(storm_data, n_train, inv_y, inv_yhat):
test_dates_t1 = storm_data[['Datetime', 'tide(t+1)', 'rain(t+1)']].iloc[n_train:]
test_dates_t1 = test_dates_t1.reset_index(drop=True)
test_dates_t1['Datetime'] = pd.to_datetime(test_dates_t1['Datetime'])
test_dates_t1['Datetime'] = test_dates_t1['Datetime'] + pd.DateOffset(hours=1)
test_dates_t9 = storm_data[['Datetime', 'tide(t+9)', 'rain(t+9)']].iloc[n_train:]
test_dates_t9 = test_dates_t9.reset_index(drop=True)
test_dates_t9['Datetime'] = | pd.to_datetime(test_dates_t9['Datetime']) | pandas.to_datetime |
# coding: utf-8
# Author: <NAME>
import os
import sys
import traceback
from datetime import datetime
import pandas as pd
import numpy as np
import woe_tools as woe
usage = '''
################################### Summarize #######################################
此工具包用于数据预处理,包含以下内容:
1.Cap
2.Floor
3.MissingImpute
4.Woe
5.Normalize
6.Scale
7.Tactic
-------------------------------------------------------------------------------------
使用说明:
import pandas as pd
import numpy as np
import preprocess as pp
df_train = pd.read_csv('train_data.csv')
df_test = pd.read_csv('test_data.csv')
df_config = pd.read_csv('edd_config.csv')
# 调用单个组件:
operation = pp.MissingImpute(df_config)
df_reference = operation.fit(df_train)
df_train = operation.apply(df_train)
df_test = operation.apply(df_test)
# 设计整个数据预处理流程:
process = pp.Tactic(df_config, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
process.summary()
df_reference = process.fit(df_train)
df_train = process.apply(df_train)
df_test = process.apply(df_test)
process.save_reference('./edd_reference.csv')
# 也可以通过读入一个已经生成的reference table,直接对数据进行apply处理
df_reference = pd.read_csv('edd_reference.csv')
process = pp.Tactic(df_reference, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
df_train = process.apply(df_train)
df_test = process.apply(df_test)
---------------------------------------------------------------------------------------
注意事项:
1. 不要在数据存在缺失值的情况下进行woe处理;
2. 当处理流程中包含woe时,必须指定target,否则会报错;
3. 对于一个新的数据集,第一次做处理时最好分步进行预处理,方便检查每步的输出是否正确。
#######################################################################################
'''
def __map_feature_type(t, time_as_num=False):
"""
convert the dataFrame type to feature type (Numerical or Categorical)
"""
if t in (int, np.int64, np.int32, np.int16, bool, float, np.float32, np.float64, np.float128):
return 'numerical'
elif t in (str,):
return 'categorical'
elif t in (pd.tslib.Timestamp, ):
return 'numerical' if time_as_num else 'timestamp'
def __extract_feature_type(df, known_columns={}):
"""
extract columns type of a dataframe and map it
"""
col_list = []
for var in df.columns:
if var in known_columns:
col_list.append((var, known_columns[var]))
continue
var_type = __map_feature_type(df[var].dtype.type)
if var_type is not None:
col_list.append((var, var_type))
continue
type_set = set(df[var][~df[var].isnull()].apply(lambda x: type(x)))
if len(type_set) == 1:
var_type = __map_feature_type(type_set.pop())
if var_type is not None:
col_list.append((var, var_type))
continue
raise ValueError('Unknown type of column "{0}" as {1}'.format(var, type_set))
return col_list
def create_edd_config(df_master, known_columns={}, save_path=None):
"""
生成数据预处理的config文件
Parameters
----------
df_master:
DataFrame
known_columns: dict, default {}
已知的列类型,eg. {'age': 'numerical, 'sex': 'categorical'}
save_path: str, default None
Returns
-------
df_config: DataFrame
预处理的配置文件
"""
column_type = __extract_feature_type(df_master, known_columns=known_columns)
df_config = | pd.DataFrame(column_type, columns=['Var_Name', 'Var_Type']) | pandas.DataFrame |
import os
import sys
import random
import itertools
import pandas as pd
import tomotopy as tp
from pathlib import Path
from collections import namedtuple
from datetime import datetime
from tqdm import tqdm
import numpy as np
from ekorpkit import eKonf
from ekorpkit.utils.func import elapsed_timer
from ekorpkit.io.load.list import load_wordlist, save_wordlist
from ekorpkit.io.file import save_dataframe, load_dataframe
from ekorpkit.visualize.wordcloud import generate_wordclouds, savefig
from ekorpkit.pipelines.pipe import apply
ModelSummary = namedtuple(
"ModelSummary",
[
"train_dt",
"filename",
eKonf.Keys.CORPUS,
"model_id",
"model_type",
"sample_ratio",
"num_docs",
"num_words",
"total_vocabs",
"used_vocabs",
"iterations",
"interval",
"burn_in",
"ll_per_word",
"tw",
"min_cf",
"min_df",
"rm_top",
"k",
"k1",
"k2",
"alpha",
"eta",
"seed",
"perplexity",
"u_mass",
"c_uci",
"c_npmi",
"c_v",
],
defaults=[None] * 29,
)
IDF = tp.TermWeight.IDF
ONE = tp.TermWeight.ONE
PMI = tp.TermWeight.PMI
class TopicModel:
def __init__(
self,
model_name,
model_dir,
output_dir,
num_workers=0,
ngram=None,
files=None,
verbose=False,
**kwargs,
):
self.model_name = model_name
self.model_dir = Path(str(model_dir))
self.output_dir = Path(str(output_dir))
self.num_workers = num_workers
self.ngram = ngram
self.files = files
self.verbose = verbose
self._raw_corpus = tp.utils.Corpus()
self._raw_corpus_keys = None
self.ngrams = None
self.ngram_model = None
self._ngram_docs = None
self.stopwords = []
self.docs = None
self.corpus = None
self.corpora = None
self.sample_ratio = 1.0
self.active_model_id = None
self.model = None
self.models = {}
self.labels = []
self.summary_file = Path(self.files.summary)
self.summaries = []
if self.summary_file.is_file():
df = eKonf.load_data(self.summary_file, index_col=0)
for row in df.itertuples():
self.summaries.append(ModelSummary(*row[1:]))
self.corpus_key_path = Path(self.files.corpus_key)
self._raw_corpus_key_path = Path(self.files.raw_corpus_key)
self.ngram_candidates_path = Path(self.files.ngram_candidates)
self.ngram_model_path = Path(self.files.ngram_model)
self.ngram_docs_path = Path(self.files.ngram_docs)
self.stoplist_paths = self.files.stoplist
if self.stoplist_paths is None:
self.stoplist_paths = []
else:
if isinstance(self.stoplist_paths, str):
self.stoplist_paths = [self.stoplist_paths]
else:
self.stoplist_paths = list(self.stoplist_paths)
self.stopwords_path = Path(self.files.stopwords)
self.default_stopwords_path = Path(self.files.default_stopwords)
self.default_word_prior_path = Path(self.files.default_word_prior)
self.word_prior_path = Path(self.files.word_prior)
(self.model_dir).mkdir(exist_ok=True, parents=True)
(self.output_dir / "figures/wc").mkdir(exist_ok=True, parents=True)
(self.output_dir / "figures/train").mkdir(exist_ok=True, parents=True)
(self.output_dir / "output/train").mkdir(exist_ok=True, parents=True)
(self.output_dir / "figures/tune").mkdir(exist_ok=True, parents=True)
(self.output_dir / "output/tune").mkdir(exist_ok=True, parents=True)
(self.output_dir / "logs").mkdir(exist_ok=True, parents=True)
def _load_raw_corpus(self, reload_corpus=False):
def data_feeder(docs):
for doc in docs:
fd = doc.strip().split(maxsplit=1)
timepoint = int(fd[0])
yield fd[1], None, {"timepoint": timepoint}
if not self._raw_corpus or reload_corpus:
self._raw_corpus = tp.utils.Corpus(tokenizer=tp.utils.SimpleTokenizer())
if self.corpora is None:
raise ValueError("corpora is not set")
with elapsed_timer() as elapsed:
self.corpora.load()
self.corpora.concat_corpora()
df = self.corpora._data
self._raw_corpus_keys = df[self.corpora._id_keys].values.tolist()
self._raw_corpus.process(df[self.corpora._text_key].to_list())
eKonf.save_data(
df[self.corpora.IDs],
self._raw_corpus_key_path,
verbose=self.verbose,
)
print("Elapsed time is %.2f seconds" % elapsed())
def extract_ngrams(self):
if self.ngrams is None:
self._load_raw_corpus()
assert self._raw_corpus, "Load a corpus first"
with elapsed_timer() as elapsed:
print("Extracting ngram candidates")
self.ngrams = self._raw_corpus.extract_ngrams(
min_cf=self.ngram.min_cf,
min_df=self.ngram.min_df,
max_len=self.ngram.max_len,
max_cand=self.ngram.max_cand,
min_score=self.ngram.min_score,
normalized=self.ngram.normalized,
workers=self.num_workers,
)
# print(self.ngrams)
ngram_list = [
{"words": ",".join(cand.words), "score": cand.score}
for cand in self.ngrams
]
df = pd.DataFrame(ngram_list)
eKonf.save_data(df, self.ngram_candidates_path, verbose=self.verbose)
print("Elapsed time is %.2f seconds" % elapsed())
def _load_ngram_docs(self, rebuild=False):
if self.ngram_docs_path.is_file() and not rebuild:
with elapsed_timer() as elapsed:
print(f"Starting to load ngram documents from {self.ngram_docs_path}")
self._raw_corpus = tp.utils.Corpus().load(self.ngram_docs_path)
df = eKonf.load_data(self._raw_corpus_key_path)
self._raw_corpus_keys = df[self.corpora._id_keys].values.tolist()
# self._raw_corpus.load(self.ngram_doc_path)
print(f"{len(self._raw_corpus)} documents are loaded.")
print("Elapsed time is %.2f seconds" % elapsed())
else:
self.extract_ngrams()
assert self.ngrams, "Load a ngrams first"
print("Building ngram docs by concatenaing words in ngram list")
self._raw_corpus.concat_ngrams(self.ngrams, self.ngram.delimiter)
self._raw_corpus.save(self.ngram_docs_path)
def _load_stopwords(self):
self.stopwords = []
if self.stopwords_path.is_file():
self.stopwords = load_wordlist(self.stopwords_path, lowercase=True)
else:
if self.default_stopwords_path.is_file():
self.stopwords = load_wordlist(
self.default_stopwords_path, lowercase=True
)
else:
self.stopwords = ["."]
save_wordlist(self.stopwords, self.stopwords_path)
if self.verbose:
print(f"{len(self.stopwords)} stopwords are loaded.")
for path in self.stoplist_paths:
if os.path.exists(path):
stopwords = load_wordlist(path, lowercase=True)
self.stopwords += stopwords
save_wordlist(stopwords, path)
if self.verbose:
print(f"{len(stopwords)} stopwords are loaded from {path}")
def _load_word_prior(self):
if self.word_prior_path.is_file():
self.word_prior = eKonf.load(self.word_prior_path)
print(self.word_prior)
else:
if self.default_word_prior_path.is_file():
self.word_prior = eKonf.load(self.default_word_prior_path)
print(self.word_prior)
else:
self.word_prior = {}
eKonf.save(self.word_prior, self.word_prior_path)
def load_corpus(
self,
sample_ratio=1.0,
reload_corpus=False,
min_num_words=5,
min_word_len=2,
rebuild=False,
**kwargs,
):
sample_ratio = sample_ratio if sample_ratio else self.sample_ratio
if self.corpus and self.sample_ratio == sample_ratio and not reload_corpus:
print("Corpus is already loaded w/ sample_ratio: {}".format(sample_ratio))
return True
else:
print("Start loading corpus w/ sample_ratio: {}".format(sample_ratio))
if not self._raw_corpus:
self._load_ngram_docs(rebuild=rebuild)
self._load_stopwords()
assert self._raw_corpus, "Load ngram documents first"
assert self.stopwords, "Load stopwords first"
if sample_ratio and sample_ratio < 1.0 and sample_ratio > 0.0:
docs = random.sample(
self._raw_corpus, int(len(self._raw_corpus) * sample_ratio)
)
self.sample_ratio = sample_ratio
else:
docs = self._raw_corpus
self.sample_ratio = 1.0
self.corpus = tp.utils.Corpus()
self.corpus_keys = []
n_skipped = 0
for i_doc, doc in tqdm(enumerate(docs)):
words = [
w for w in doc if w not in self.stopwords and len(w) >= min_word_len
]
if len(words) > min_num_words:
self.corpus.add_doc(words=words)
self.corpus_keys.append(self._raw_corpus_keys[i_doc])
else:
if self.verbose > 5:
print(
f"Skipped - index={i_doc}, key={self._raw_corpus_keys[i_doc]}, words={list(words)}"
)
n_skipped += 1
print(f"Total {i_doc-n_skipped+1} documents are loaded.")
print(f"Total {n_skipped} documents are removed from the corpus.")
df = pd.DataFrame(self.corpus_keys, columns=self.corpora._id_keys)
eKonf.save_data(
df[self.corpora._id_keys], self.corpus_key_path, verbose=self.verbose
)
def infer_topics(
self,
output_dir=None,
output_file=None,
iterations=100,
min_num_words=5,
min_word_len=2,
num_workers=0,
use_batcher=True,
minibatch_size=None,
**kwargs,
):
self._load_stopwords()
assert self.stopwords, "Load stopwords first"
assert self.model, "Model not found"
print("Infer document out of the model")
os.makedirs(os.path.abspath(output_dir), exist_ok=True)
num_workers = num_workers if num_workers else 1
text_key = self.corpora._text_key
id_keys = self.corpora._id_keys
df_ngram = eKonf.load_data(self.ngram_candidates_path)
ngrams = []
for ngram in df_ngram['words'].to_list():
ngrams.append(ngram.split(','))
simtok = SimpleTokenizer(
stopwords=self.stopwords,
min_word_len=min_word_len,
min_num_words=min_num_words,
ngrams=ngrams,
ngram_delimiter=self.ngram.delimiter,
verbose=self.verbose,
)
if self.corpora is None:
raise ValueError("corpora is not set")
with elapsed_timer() as elapsed:
self.corpora.load()
self.corpora.concat_corpora()
df = self.corpora._data
df.dropna(subset=[text_key], inplace=True)
df[text_key] = apply(
simtok.tokenize,
df[text_key],
description=f"tokenize",
verbose=self.verbose,
use_batcher=use_batcher,
minibatch_size=minibatch_size,
)
df = df.dropna(subset=[text_key]).reset_index(drop=True)
if self.verbose:
print(df.tail())
docs = []
indexes_to_drop = []
for ix in df.index:
doc = df.loc[ix, text_key]
mdoc = self.model.make_doc(doc)
if mdoc:
docs.append(mdoc)
else:
print(f"Skipped - {doc}")
indexes_to_drop.append(ix)
df = df.drop(df.index[indexes_to_drop]).reset_index(drop=True)
if self.verbose:
print(f"{len(docs)} documents are loaded from: {len(df.index)}.")
topic_dists, ll = self.model.infer(
docs, workers=num_workers, iter=iterations
)
if self.verbose:
print(topic_dists[-1:], ll)
print(f"Total inferred: {len(topic_dists)}, from: {len(df.index)}")
if len(topic_dists) == len(df.index):
idx = range(len(topic_dists[0]))
df_infer = | pd.DataFrame(topic_dists, columns=[f"topic{i}" for i in idx]) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
| tm.assert_frame_equal(shifted[0], shifted[2]) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 11:22:36 2020
@author: felip_001
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from time import time
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
from matplotlib.widgets import Button
import util
import datetime
import pandapower as pp
import networkx as nx
import pandapower.control as ppc
def plot_lines(lines, col='ShapeGPS', ax=None, **plot_params):
""" Plots a DataFrame containing line coordinates in column 'col'
"""
segs = []
if not (col in lines):
if 'Shape' in lines:
col='Shape'
for i in lines[col]:
segs.append(i)
if ax==None:
f, ax = plt.subplots()
if not 'color' in plot_params:
plot_params['color'] = 'r'
if not 'alpha' in plot_params:
plot_params['alpha'] = 0.5
lc = LineCollection(segs, gid=list(lines.index), **plot_params)
ax.add_collection(lc)
ax.autoscale()
return ax
def plot_quick(lines, lv, ss, nodes=None, GPS=False):
if GPS:
sg = 'GPS'
d = 0.005
else:
sg = ''
d = 20
col='Shape' + sg
x = 'x' + sg
y = 'y' + sg
x1 = 'x1' + sg
y1 = 'y1' + sg
x2 = 'x2' + sg
y2 = 'y2' + sg
ax=plot_lines(lines, col=col, label='Lines')
ax.plot(lv[x], lv[y], 'o', color='yellow', alpha=0.5, label='LV trafo')
ax.plot(ss[x], ss[y], '*', color='purple', markersize=10, label='Substation')
for s, t in ss.iterrows():
ax.text(t[x]+d, t[y]+d, s)
if nodes is None:
ax.plot(lines[x1], lines[y1], '.', color='k', markersize=1, label='Nodes')
ax.plot(lines[x2], lines[y2], '.', color='k', markersize=1, label='_')
else:
ax.plot(nodes[x], nodes[y], '.', color='k', markersize=1, label='Nodes')
q = get_node_to_line_segments(nodes, lines, lv, GPS=GPS)
plot_lines(q, linestyle='--', color='purple', col=col, ax=ax)
plt.legend()
return ax
def to_node(lines, node):
""" Returns all lines connected to a given node"""
return list(lines[((lines.node_i == node) | (lines.node_e == node))].index)
def unique_nodes(lines):
""" Returns a list of unique nodes in a list of lines """
return list(set(lines.node_i).union(set(lines.node_e)))
def new_nodes(lines, node):
l = unique_nodes(lines)
l.remove(node)
return l
def connected(lines, node):
""" Returns all lines directly or indirectly connected to a given departure node """
# create nx graph
g = nx.Graph()
for l,t in lines.iterrows():
g.add_edge(t.node_i, t.node_e)
# compute connected elements
cc = list(nx.connected_components(g))
# check for connected elements to node
for c in cc:
if node in c:
return list(lines[lines.node_i.isin(c) | lines.node_e.isin(c)].index)
def dist_to_node_nx(lines, n0, name='name'):
""" Returns a pd.Series of with the min distance of nodes to a given Node
Uses networkx, djistra min dist, super fast!
"""
# Creates nx graph
g = nx.Graph()
for l,t in lines.iterrows():
g.add_edge(t.node_i, t.node_e, weight=t.Length)
d = pd.Series(nx.single_source_dijkstra_path_length(g, n0), name=name)
return d.sort_index()
def get_ind_feeders_nx(lines, n0, verbose=False):
""" Returns a list of independent feeder connected to the Main SS (n0)"""
# Create nx graph
g = nx.Graph()
for l,t in lines.iterrows():
g.add_edge(t.node_i, t.node_e, weight=t.Length)
# Remove initial node
g.remove_node(n0)
cc = list(nx.connected_components(g))
# Putting it in pd.Series
feeder = pd.Series(index=lines.index)
feeder_length = {}
nfs = 0
for fs in cc:
lls = lines[(lines.node_i.isin(fs)) | (lines.node_e.isin(fs))].index
# check if lines are connected to main node, otherwise skip
if (n0 in lines.node_i[lls].values) or (n0 in lines.node_e[lls].values):
feeder[lls] = nfs
feeder_length[nfs] = lines.Length[lls].sum()
nfs += 1
# renaming feeders from shortest to longest
feeder_length = pd.Series(feeder_length)
feeder_length.sort_values(inplace=True)
nfs = 0
for fs in feeder_length.index:
feeder[feeder == fs] = 'F{:02d}'.format(nfs)
nfs += 1
if verbose:
print('Initial feeders = {}'.format(len(to_node(lines,n0))))
print('Finished computing indepentent feeder, Total={}'.format(len(cc)))
return feeder
def number_init_feeders(lines, n0):
# Lines connected to main node
ls = to_node(lines, n0)
lsdf = lines.loc[ls]
# List of feeders
fs = lines.Feeder.dropna().unique()
fs.sort()
print('Number of Initial feeders per connected feeder and total length [km]')
print('Feeder:\t#Init\tLength [km]')
for f in fs:
print('{:6}:\t{:4}\t{:8.2f}'.format(f, len(lsdf[lsdf.Feeder == f]), lines[lines.Feeder == f].Length.sum()/1000))
def get_farther(point, points):
idx = ((point.xGPS-points.xGPS)**2+(point.yGPS-points.yGPS)**2).idxmax()
return points.xGPS[idx], points.yGPS[idx]
def get_coord_node(lines, node, GPS=True):
u = lines[lines.node_i == node]
if len(u) == 0:
u = lines[lines.node_e == node]
if len(u) == 0:
return
cols = ['xGPS', 'yGPS'] if GPS else ['x', 'y']
return u[cols].iloc[0]
def get_node_to_line_segments(nodes, lines, lv=None, GPS=True):
""" returns a pd.DataFrame of segments that rely nodes to lines
"""
if GPS:
colpoint='xyGPS'
col = 'ShapeGPS'
colline1 = 'xy1GPS'
colline2 = 'xy2GPS'
else:
col ='Shape'
colpoint ='Shape'
colline1 = 'xy1'
colline2 = 'xy2'
# getting node coordinates for each line extreme
fi = nodes[colpoint][lines.node_i]
fe = nodes[colpoint][lines.node_e]
fi.index = lines.index
fe.index = lines.index
# reformating
shape1 = lines[colline1]
shape2 = lines[colline2]
# defining segments as [(xnode, ynode), (xline(node), yline(node))]
segi = fi.apply(lambda x: [x]) + shape1.apply(lambda x: [x])
sege = fe.apply(lambda x: [x]) + shape2.apply(lambda x: [x])
# removing segments of length null
segi = segi[~(fi==shape1)]
sege = sege[~(fe==shape2)]
# appending to output
segs = pd.concat([segi, sege], ignore_index=True)
# doing the same for LV trafos
if not (lv is None):
fl = nodes[colpoint][lv.node]
fl.index = lv.index
segl = fl.apply(lambda x: [x]) + lv[colpoint].apply(lambda x: [x])
segl = segl[~(fl==lv[colpoint])]
segs = pd.concat([segs,segl], ignore_index=True)
return pd.DataFrame(segs, columns=['Shape'])
def assign_feeder_to_node(nodes, n0, lines):
nodes['Feeder'] = ''
for f in lines.Feeder.unique():
nodes.Feeder[unique_nodes(lines[lines.Feeder == f])] = f
nodes.Feeder[n0] = '0SS'
def rename_nodes(nodes, n0, lines, lv, ss):
""" Rename the nodes according to Feeder appartenance and distance to main node
New index is just ascending numeric
Adds new column 'name' with a meaningful name
Updates relations to lines node_i, node_e; lv and ss
"""
nodes['d'] = ((nodes.xGPS[n0]-nodes.xGPS)**2+(nodes.yGPS[n0]-nodes.yGPS)**2)
assign_feeder_to_node(nodes, n0, lines)
# Sorting nodes
nodes.sort_values(['Feeder', 'd'], inplace=True)
# Creating new index
nodes.reset_index(inplace=True)
#Renames nodes
nodes.index = 'N'+ nodes.index.astype(str)
# Get relationship old index-new index
old_new = pd.Series(index=nodes['index'], data=nodes.index)
#Update relationship to lines and trafos
lines.node_i = old_new[lines.node_i].values
lines.node_e = old_new[lines.node_e].values
lv.node = old_new[lv.node].values
ss.node = old_new[ss.node].values
# drops old index
nodes.drop('index', axis=1, inplace=True)
def rename_lines(lines, n0):
""" Rename lines according to Feeder and distance to main node
Name (index) considers number and feeder meaningful name
"""
# Computing distance to main node
l = lines.loc[to_node(lines,n0)[0]]
x0 = l.x1GPS if l.node_i == n0 else l.x2GPS
y0 = l.y1GPS if l.node_i == n0 else l.y2GPS
d1 = ((x0-lines.x1GPS)**2+(y0-lines.y1GPS)**2)
d2 = ((x0-lines.x2GPS)**2+(y0-lines.y2GPS)**2)
lines['d'] = pd.DataFrame([d1,d2]).min()
# sorting lines
lines.sort_values(['Feeder', 'd'], inplace=True, ascending=True)
# Creating new index
lines.reset_index(inplace=True, drop=True)
# Renaming lines
#Adds new name
lines.index = ['L'+str(i)+'_'+lines.Feeder[i] for i in lines.index]
#%% Reducing number of nodes section
def join_lines(lines, l1, l2):
""" Joins line l2 to l1
"""
nl = dict(lines.loc[l1])
ol = dict(lines.loc[l2])
# id common node
if nl['node_i'] in [ol['node_i'], ol['node_e']]:
common_node = nl['node_i']
elif nl['node_e'] in [ol['node_i'], ol['node_e']]:
common_node = nl['node_e']
else:
print('no common node')
return
# id direction of join
if nl['node_i'] == common_node:
nl['x1'] = nl['x2']
nl['y1'] = nl['y2']
nl['x1GPS'] = nl['x2GPS']
nl['y1GPS'] = nl['y2GPS']
nl['node_i'] = nl['node_e']
nl['Shape'] = nl['Shape'][::-1]
nl['ShapeGPS'] = nl['ShapeGPS'][::-1]
if ol['node_i'] == common_node:
nl['x2'] = ol['x2']
nl['y2'] = ol['y2']
nl['x2GPS'] = ol['x2GPS']
nl['y2GPS'] = ol['y2GPS']
nl['node_e'] = ol['node_e']
nl['Shape'] += ol['Shape']
nl['ShapeGPS'] += ol['ShapeGPS']
else:
nl['x2'] = ol['x1']
nl['y2'] = ol['y1']
nl['x2GPS'] = ol['x1GPS']
nl['y2GPS'] = ol['y1GPS']
nl['node_e'] = ol['node_i']
nl['Shape'] += ol['Shape'][::-1]
nl['ShapeGPS'] += ol['ShapeGPS'][::-1]
nl['x'] = np.mean([xy[0] for xy in nl['Shape']])
nl['y'] = np.mean([xy[1] for xy in nl['Shape']])
nl['xGPS'] = np.mean([xy[0] for xy in nl['ShapeGPS']])
nl['yGPS'] = np.mean([xy[1] for xy in nl['ShapeGPS']])
nl['Length'] += ol['Length']
lines.loc[l1] = list(nl.values())
lines.drop(l2, inplace=True)
def count_elements(lines, nodes, lv=None, ss=None):
""" Returns the number of elements arriving at each node
"""
oh = lines[lines.Type == 'Overhead']
ug = lines[lines.Type == 'Underground']
cols = ['nlines_oh', 'nlines_ug']
if not lv is None:
cols += ['nlv']
if not ss is None:
cols += ['nss']
# Counting elements arriving at each node
nelements = | pd.DataFrame(index=nodes.index, columns=cols) | pandas.DataFrame |
### This script will create a trained scVI model and write the data that is used by wormcells-viz
### https://github.com/WormBase/wormcells-viz
### please check that the default arguments match your annotations
'''
It assumes that data has been wrangled into the WormBase standard anndata format:
https://github.com/WormBase/anndata-wrangling
Three separate anndata files (.h5ad) will be created:
## For the expression heatmap
This data is a 2D matrix of shape:
$ n_{celltypes} \times n_{genes} = x_{obs} \times y_{var} $
```
adata.obs = cell_types
adata.var = gene_id
adata.X = log10 scvi expression frequency values in the X field
```
## For the gene histogram
This data is a 3D tensor of shape:
$ n_{celltypes} \times n_{bins} \times n_{genes} = x_{obs} \times y_{var} \times z_{layers} $
The anndata obs contains the cell types and var contains the histogram bins,
the genes are stored in layers with the keys being the gene ID.
We store the genes in the layers because each view in the wormcells-viz app show the histograms for a single gene,
so this makes accessing the data simpler
The histogram bin counts are computed from the scvi normalized expression values, binned in 100 bins from 10^-9 to 10^0
```
adata.obs = cell_types
adata.var = bins with counts
these should be 100 evenly spaced bins, with the counts of cells containing
values between (-10, 0), representing the data 10^-9 to 10^0 expression rates log10 transformed
adata.X = NOTHING (filled with vector of zeroes)
adata.layers[cell_type] = the key is the corresponding cell_type
each layer contains counts in each bin for all cell types
adata.uns['about'] = information about the dataset
```
## For the swarm plots
This data is a 3D tensor of shape:
$ n_{celltypes} \times n_{genes} \times n_{celltypes} = x_{obs} \times y_{var} \times z_{layers} $
Notice that the cell types are repeated along two dimensions, because this data contains the results of pairwise DE
comparisons among each cell type in the data.
Plus $n_{celltypes}$ matrices of shape:
$ n_{celltypes} \times n_{genes} = x_{obs} \times y_{var} $
Because each `anndata.uns[celltype]` contains a dataframe with global differnetial expression results for that celltype.
Finally, `anndata.uns['heatmap']` contains the 2D matrix with log10 scvi expression rates heatmap data, with genes in
the index and cell types in the columns. This can be used to display the expression of each tissue upon mouseover.
```
anndata.obs = cell_types
anndata.var = gene_id
anndata.X = NOTHING (filled with vector of zeroes)
anndata.layers[cell_type] = mean log fold change for a given cell type for all genes
anndata.uns[cell_type] = contain the DE result of the corresponding cell type vs all other cells
this can be used for ordering the genes by p-value, expression,
and by log fold change lfc max/min/median/mean/std
anndata.uns['heatmap']= dataframe with genes in index and cell types in columns containing the log10 of the
scvi expression frequency for each cell type
anndata.uns['about'] = information about the dataset
```
'''
### USER DEFINED ARGUMENTS
### PLEASE MAKE SURE THESE ARGUMENTS MATCH YOUR DATA
# path to anndata file on which to train the model
anndata_path = 'cengen.h5ad'
# this should be the label on which you'd like to stratify groups
# typically it is cell_type or cell_subtype
stratification_label = 'cell_subtype'
# minimum number of UMIs seen for each gene to kept
min_gene_counts = 100
### the adata.obs key on which scvi should perform batch correction
batch_key = 'sample_batch'
# model_name is the name of the folder where scvi will look for the trained model, or
# save the trained model if it doesn't find anything
model_name = 'cengen_scvi_2021-06-13'
### these multiline strings will be added to the adata.uns['about'] property, it can be anything
about_heatmap = """
Heatmap data for model {string_model_name}.
This h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
about_histograms = """
Histogram data for model {string_model_name}.
This h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
about_swarmplots = """
Swarm plot data for model {string_model_name}
this h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
### END OF USER ARGUMENTS SECTION --- YOU SHOULDN'T NEED TO CHANGE ANYTHING BELOW HERE ####
### IMPORTS ###
print('Starting imports...')
import anndata
import scvi
import numpy as np
from tqdm import tqdm
import pandas as pd
import os
import scanpy
import warnings
from scipy import sparse
warnings.filterwarnings("ignore")
print('Using scvi-tools version:', scvi.__version__)
def load_or_train_scvi_model(model_name=model_name, anndata_path=anndata_path):
# Try loading model, if it doesn't exist train from scratch
print('Trying to load or train model...')
try:
model = scvi.model.SCVI.load(model_name)
print('Loaded model:', model_name)
except:
### DEFINE AND TRAIN MODEL
# these hyperparameters are fine for a small dataset, with a few batches
# if integration is a problem then you can try increasing the layers to 3
# and hidden units to 256
print('Creating and training model:', model_name)
adata = anndata.read_h5ad(anndata_path)
print(adata)
print('Restricting to genes with minimum counts of ', min_gene_counts)
adata.var['gene_counts'] = np.squeeze(np.asarray(adata.X.sum(0)))
adata = adata[:, adata.var.gene_counts > min_gene_counts]
print(adata)
## register adata with SCVI, for more information see
## https://docs.scvi-tools.org/en/stable/api/reference/scvi.data.setup_anndata.html
adata.layers["counts"] = adata.X.copy().tocsr() # converts to CSR format, preserve counts
scvi.data.setup_anndata(adata,
layer="counts",
batch_key=batch_key)
# typically you don't need to go tweak these parameters for training a model
model = scvi.model.SCVI(adata,
n_hidden=256,
n_layers=2,
gene_likelihood='nb',
dispersion='gene-batch'
)
# MODEL TRAINING
# this model will train quickly even without a GPU, 25 epochs is not quite enough to
# finish training, but this notebook is meant to run quickly just for showing the entire
# data generation pipeline
model.train(check_val_every_n_epoch=1,
use_gpu=True,
max_epochs=125,
plan_kwargs={'lr': 1e-3})
train_test_results = model.history['elbo_train']
train_test_results['elbo_validation'] = model.history['elbo_validation']
### MAKE SURE THE MODEL FINISHED TRAINING FOR BEST RESULTS
print(train_test_results)
model.save(model_name, save_anndata=True)
# save the training results to a csv for inspection if needed
train_test_results.to_csv(model_name + '+train_test_results.csv')
return model
def make_de_global(model, stratification_label=stratification_label, model_name=model_name):
# perform DE on each cell type vs the rest of cells, this computes the expresssion (scale1)
# in each celltype, used for the heatmap anndata, plus scale1, the p-values and lfc_median
# for each cell type which are used for ranking the swarmplot
# saves in a csv to avoid recomputing
# checks if the CSV exists prior to running the DE
de_global_filename = model_name + '+de_global.csv'
try:
de_global = pd.read_csv(de_global_filename, index_col=0)
print('Loaded global DE:', de_global_filename)
except:
print('Performing global DE...')
de_global = model.differential_expression(
groupby=stratification_label,
all_stats=False
)
# scvi currently puts the groups in a column named "comparison", eg
# an entry would be "Neurons vs Instestine" but we need to split that into
# one column for group1 and group2. Submitted a PR to change that:
# https://github.com/YosefLab/scvi-tools/pull/1074
de_global['group1'] = de_global['comparison'].str.split(' vs ', expand=True)[0]
de_global['group2'] = de_global['comparison'].str.split(' vs ', expand=True)[1]
de_global.to_csv(de_global_filename)
return de_global
def make_heatmap_anndata(de_global,
about=about_heatmap,
model_name=model_name,
stratification_label=stratification_label):
heatmap_anndata_filename = model_name + '+heatmap_anndata.h5ad'
if os.path.isfile(heatmap_anndata_filename):
print('Skipping heatmatp creation, anndata already exists at file: ', heatmap_anndata_filename)
return None
else:
print('Creating heatmap anndata... ')
# pivot the DE result dataframe to create a dataframe for the heatmap
# with gene ids in the index and cell type name in the columns and
# scale1 in the entries, then take the log10 of scale1
heatmap_df = de_global[['scale1', 'group1']]
heatmap_df['log10scale1'] = np.log10(heatmap_df['scale1']).values
heatmap_df = heatmap_df[['log10scale1', 'group1']]
heatmap_df = heatmap_df.pivot(columns='group1', values='log10scale1')
heatmap_df.to_csv(model_name + '+heatmap_df.csv')
# put the heatmap data in anndata object
heatmap_adata = anndata.AnnData(X=heatmap_df.values.T,
obs=pd.DataFrame(index=heatmap_df.columns.values),
var=pd.DataFrame(index=heatmap_df.index.values),
)
# rename obs and var to make clear what they hold
heatmap_adata.var.index.rename('gene_id', inplace=True)
heatmap_adata.obs.index.rename(stratification_label, inplace=True)
# add some meatadata explaining what the data is
heatmap_adata.uns['about'] = about_heatmap
heatmap_adata.write_h5ad(heatmap_anndata_filename)
print('Heatmap anndata saved: ', heatmap_anndata_filename)
return None
def make_histogram_anndata(model,
stratification_label=stratification_label,
about_histograms=about_histograms):
histogram_anndata_filename = model_name + '+histogram_anndata.h5ad'
if os.path.isfile(histogram_anndata_filename):
print('Skipping histogram creation, anndata already exists at file: ', histogram_anndata_filename)
return None
else:
adata = model.adata
bins_intervals = np.histogram([0], bins=100, range=(-10, 0), density=False)[1][:-1]
### get the scvi normalized expression then log10 that
adata.layers['normalized'] = model.get_normalized_expression()
adata.layers['log10normalized'] = np.log10(adata.layers['normalized'])
###loops through each cell type and then each gene to compute the histogram of expression
# first get dimensions to initialize adata object
obs_stratification_label_unique_values = adata.obs[stratification_label].unique()
# gets the bin intervals from the np histogram function
nbins = 100
histogram_range = (-10, 0)
bin_intervals = np.histogram([0], bins=nbins, range=histogram_range, density=False)[1][:-1]
# converts list of bins to string for anndata var index
bin_intervals = np.round(list(bins_intervals), 1).astype(str)
gene_histogram_adata = anndata.AnnData(X=np.zeros((len(obs_stratification_label_unique_values),
len(bins_intervals))),
var=pd.DataFrame(index=bin_intervals),
obs=pd.DataFrame(index=obs_stratification_label_unique_values),
)
# rename obs and var to make clear what they hold
gene_histogram_adata.var.index.rename('histogram_bins', inplace=True)
gene_histogram_adata.obs.index.rename(stratification_label, inplace=True)
# now that adata is ready loop through every gene
# and for each gene computes the counts in each bin for each cell type
for gene_id in tqdm(adata.var.index):
log10_normalized_expression_in_gene = adata[:, adata.var.index == gene_id].layers['log10normalized']
log10_normalized_expression_in_gene = np.squeeze(np.asarray(log10_normalized_expression_in_gene))
# gets the bin intervals from the np histogram function
gene_histogram_df = pd.DataFrame(columns=bins_intervals)
for label in obs_stratification_label_unique_values:
# fetch only the expression of that gene in that cell
log10_normalized_expression_in_celltype = log10_normalized_expression_in_gene[
adata.obs[stratification_label] == label]
gene_histogram_df.loc[label] = \
np.histogram(log10_normalized_expression_in_celltype, bins=100, range=(-10, 0), density=False)[0]
gene_histogram_df.loc[label] = \
np.histogram(log10_normalized_expression_in_celltype, bins=100, range=(-10, 0), density=False)[0]
# convert to sparse matrix to reduce final file size
gene_histogram_adata.layers[gene_id]=sparse.csr_matrix(gene_histogram_df.values.astype('int16'))
# add some meatadata explaining what the data is
gene_histogram_adata.uns['about'] = about_histograms
gene_histogram_adata.write_h5ad(histogram_anndata_filename)
def compute_pairwise_de_one_group(group1_label,
model,
stratification_label=stratification_label,
model_name=model_name
):
adata = model.adata
csv_filename = model_name + '+pairwise_de_one_group+' + group1_label + '+.csv'
if os.path.isfile('./pairwise_de/'+csv_filename):
print('Skipping pairwise DE, csv file already exists: ', csv_filename)
return None
else:
print('Doing pairwise DE for ', stratification_label, group1_label)
# for a given group1_label (eg `Intestine`) do pairwise DE vs all other labels in that category (eg all other cell types)
obs_stratification_label_unique_values = adata.obs[stratification_label].unique()
pairwise_de_one_group = pd.DataFrame()
for group2_label in tqdm(obs_stratification_label_unique_values):
de_df = model.differential_expression(
groupby=stratification_label,
group1=group1_label,
group2=group2_label,
silent=True,
n_samples=5000,
all_stats=False
)
de_df['group1'] = group1_label
de_df['group2'] = group2_label
pairwise_de_one_group = pairwise_de_one_group.append(de_df)
# write to disk just in case
if not os.path.exists('pairwise_de'):
os.makedirs('pairwise_de')
pairwise_de_one_group.to_csv('./pairwise_de/'+csv_filename)
return pairwise_de_one_group
def make_swarmplot_anndata(model,
stratification_label=stratification_label,
about_swarmplots=about_swarmplots):
adata = model.adata
obs_stratification_label_unique_values = adata.obs[stratification_label].unique()
# check that all files exist
for group1_label in obs_stratification_label_unique_values:
csv_filename = model_name + '+pairwise_de_one_group+' + group1_label + '+.csv'
if not os.path.isfile('./pairwise_de/'+csv_filename):
print('Aborting -- Missing pairwise DE csv file: ', csv_filename)
return None
# initialize one pairwise_de dataframe
pairwise_de = pd.read_csv('./pairwise_de/'+csv_filename, index_col=0)
# make one swarm df to get the shape and order of the cell types/genes to initialize adata
mock_swarmdf = pairwise_de.pivot(values='lfc_median', columns='group2').round(2)
# initialize adata and fills the X with zeroes, the lfc_median for each cell type go in the layers
swarmplot_anndata_filename = model_name + '+swarmplot_anndata.h5ad'
swarmplot_adata = anndata.AnnData(X=np.zeros((len(mock_swarmdf.columns), len(mock_swarmdf.index))),
obs= | pd.DataFrame(index=mock_swarmdf.columns) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from Xclusion_criteria.xclusion_dtypes import (
get_dtypes,
get_dtypes_init,
get_dtypes_final,
check_dtype_object,
split_variables_types,
check_num_cat_lists
)
class TestDtypes(unittest.TestCase):
def setUp(self):
self.dtypes = {
'num': 'int',
'float': 'float',
'num_nan': 'int',
'num_str_ok': 'int',
'num_str_notok': 'int',
'cat_1': 'object',
'cat_nan': 'object',
'cat_str_ok': 'object'
}
self.criteria = {
('cat_1', '0'): ['f1', 'f2', 'f3'], ('cat_nan', '1'): ['f1', 'f2']}
self.nulls = ['missing', 'not applicable']
self.dtype_pd1 = pd.DataFrame({
'sample_name': ['sam_1', 'sam_2', 'sam_3'],
'col1': [1, 2, np.nan],
'col2': [1.3, 1.5, 3.0],
'col3': ['x', 'y', 'z'],
'col4': [0, 1, 2],
'col5': [1.2, 1.5, 'missing']
})
self.test_dtype_pd1 = {
'col1': 'float',
'col2': 'float',
'col3': 'object',
'col4': 'int',
'col5': 'float'
}
self.dtype_pd2 = pd.DataFrame({
'sample_name': ['sam_1', 'sam_2', 'sam_3'],
'cat_1': ['a', 'b', 'c'],
'cat_2': ['a', 'b', False],
'cat_3': [1, 1.3, True],
'cat_4': ['a', 'b', 2.4],
'cat_5': ['a', 'b', np.nan],
'float': [1.2, 1.4, 1],
'int': [1, 2, 3],
'check': [1, 1.3, 'not applicable']})
self.test_dtype_pd2 = {
'cat_1': 'object',
'cat_2': 'object',
'cat_3': 'object',
'cat_4': 'object',
'cat_5': 'object',
'check': 'float',
'float': 'float',
'int': 'int'
}
def test_split_variables_types(self):
num, cat = [], []
split_variables_types(self.dtypes, num, cat)
self.assertEqual(sorted(num), sorted(['num','float','num_nan', 'num_str_ok','num_str_notok']))
self.assertEqual(sorted(cat), sorted(['cat_1','cat_nan', 'cat_str_ok']))
def test_get_dtypes_init(self):
test_dtypes_init = {
'col1': ['float'],
'col2': ['float'],
'col3': ['object', 'object'],
'col4': ['int'],
'col5': ['object', 'check']
}
self.assertEqual(get_dtypes_init(self.dtype_pd1), test_dtypes_init)
cur_dict = {
'cat_1': ['object', 'object'],
'cat_2': ['object', 'object'],
'cat_3': ['object', 'object'],
'cat_4': ['object', 'check'],
'cat_5': ['object', 'check'],
'check': ['object', 'check'],
'float': ['float'],
'int': ['int']
}
self.assertEqual(get_dtypes_init(self.dtype_pd2), cur_dict)
def test_get_dtypes_final(self):
self.assertEqual(
get_dtypes_final(self.dtype_pd1, self.nulls,
get_dtypes_init(self.dtype_pd1)),
self.test_dtype_pd1
)
self.assertEqual(
get_dtypes_final(self.dtype_pd2, self.nulls,
get_dtypes_init(self.dtype_pd2)),
self.test_dtype_pd2
)
def test_check_dtype_object(self):
dtype = check_dtype_object( | pd.Series(['a', 'b', 'c']) | pandas.Series |
"""
Time-frequency submodule.
"""
from .eeg_data import eeg_select_sensor_area
from ..miscellaneous import Time
import numpy as np
import pandas as pd
import mne
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_name_frequencies(freqs):
"""
Name frequencies according to standart classifications.
Parameters
----------
freqs : list or numpy.array
list of floats containing frequencies to classify.
Returns
----------
freqs_names : list
Named frequencies
Example
----------
>>> import neurokit as nk
>>>
>>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15])
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- <NAME> (https://github.com/DominiqueMakowski)
References
------------
- None
"""
freqs = list(freqs)
freqs_names = []
for freq in freqs:
if freq < 1:
freqs_names.append("UltraLow")
elif freq <= 3:
freqs_names.append("Delta")
elif freq <= 7:
freqs_names.append("Theta")
elif freq <= 9:
freqs_names.append("Alpha1/Mu")
elif freq <= 12:
freqs_names.append("Alpha2/Mu")
elif freq <= 13:
freqs_names.append("Beta1/Mu")
elif freq <= 17:
freqs_names.append("Beta1")
elif freq <= 30:
freqs_names.append("Beta2")
elif freq <= 40:
freqs_names.append("Gamma1")
elif freq <= 50:
freqs_names.append("Gamma2")
else:
freqs_names.append("UltraHigh")
return(freqs_names)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_psd(raw, sensors_include="all", sensors_exclude=None, fmin=0.016, fmax=60, method="multitaper", proj=False):
"""
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- <NAME> (https://github.com/DominiqueMakowski)
References
------------
- None
"""
picks = mne.pick_types(raw.info, include=eeg_select_sensor_area(include=sensors_include, exclude=sensors_exclude), exclude="bads")
if method == "multitaper":
psds, freqs = mne.time_frequency.psd_multitaper(raw,
fmin=fmin,
fmax=fmax,
low_bias=True,
proj=proj,
picks=picks)
else:
psds, freqs = mne.time_frequency.psd_welch(raw,
fmin=fmin,
fmax=fmax,
proj=proj,
picks=picks)
tf = pd.DataFrame(psds)
tf.columns = eeg_name_frequencies(freqs)
tf = tf.mean(axis=0)
mean_psd = {}
for freq in ["UltraLow", "Delta", "Theta", "Alpha", "Alpha1", "Alpha2", "Mu", "Beta", "Beta1", "Beta2", "Gamma", "Gamma1", "Gamma2", "UltraHigh"]:
mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()
mean_psd = pd.DataFrame.from_dict(mean_psd, orient="index").T
return(mean_psd)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_create_frequency_bands(bands="all", step=1):
"""
Delta: 1-3Hz
Theta: 4-7Hz
Alpha1: 8-9Hz
Alpha2: 10-12Hz
Beta1: 13-17Hz
Beta2: 18-30Hz
Gamma1: 31-40Hz
Gamma2: 41-50Hz
Mu: 8-13Hz
"""
if bands == "all" or bands == "All":
bands = ["Delta", "Theta", "Alpha", "Beta", "Gamma", "Mu"]
if "Alpha" in bands:
bands.remove("Alpha")
bands += ["Alpha1", "Alpha2"]
if "Beta" in bands:
bands.remove("Beta")
bands += ["Beta1", "Beta2"]
if "Gamma" in bands:
bands.remove("Gamma")
bands += ["Gamma1", "Gamma2"]
frequencies = {}
for band in bands:
if band == "Delta":
frequencies[band] = np.arange(1, 3+0.1, step)
if band == "Theta":
frequencies[band] = np.arange(4, 7+0.1, step)
if band == "Alpha1":
frequencies[band] = np.arange(8, 9+0.1, step)
if band == "Alpha2":
frequencies[band] = np.arange(10, 12+0.1, step)
if band == "Beta1":
frequencies[band] = np.arange(13, 17+0.1, step)
if band == "Beta2":
frequencies[band] = np.arange(18, 30+0.1, step)
if band == "Gamma1":
frequencies[band] = np.arange(31, 40+0.1, step)
if band == "Gamma2":
frequencies[band] = np.arange(41, 50+0.1, step)
if band == "Mu":
frequencies[band] = np.arange(8, 13+0.1, step)
return(frequencies)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_power_per_frequency_band(epoch, bands="all", step=1):
"""
"""
frequencies = eeg_create_frequency_bands(bands=bands, step=step)
power_per_band = {}
for band in frequencies:
power, itc = mne.time_frequency.tfr_morlet(epoch, freqs=frequencies[band], n_cycles=frequencies[band]/2, use_fft=True, return_itc=True, decim=3, n_jobs=1)
data = power.data
times = power.times
freqs = power.freqs
df = pd.DataFrame(np.average(data, axis=0).T, index=times, columns=freqs)
df = df.mean(axis=1)
power_per_band[band] = list(df)
df = | pd.DataFrame.from_dict(power_per_band) | pandas.DataFrame.from_dict |
import argparse
import time
import os
import pandas as pd
from pytrends.request import TrendReq
class GoogleTrends(object):
def __init__(self, DATA_PATH, SAVING_PATH, GEO, TIMEFRAME, PYTREND):
self.data_path = DATA_PATH
self.saving_path = SAVING_PATH
self.geo = GEO
self.timeframe = TIMEFRAME
self.pytrend = PYTREND
def get_ticker_list(self, TICKER_PATH):
ticker = pd.read_excel(TICKER_PATH)
return ticker.ticker.tolist()
def get_trends(self):
ticker_list = self.get_ticker_list(self.data_path)
limit = 50
df = | pd.DataFrame({'date': [], 'variable': [], 'value': []}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
def city_weather_clean(df):
df['Cloud_numerical'] = df['cloud']
d1 = {
'Fair':0
,'Mostly Cloudy':2
,'Cloudy':1
,'Partly Cloudy':1
,'Light Rain':2
, 'Light Drizzle':2
,'Rain':2
,'Light Rain with Thunder':2
,'Heavy T-Storm':2
,'Thunder':2
, 'Heavy Rain':2
,'T-Storm':2
, 'Fog':2
, 'Mostly Cloudy / Windy':2
, 'Cloudy / Windy':2
, 'Haze':1
, 'Fair / Windy':0
, 'Partly Cloudy / Windy':1
, 'Light Rain / Windy':2
, 'Heavy T-Storm / Windy':2
, 'Heavy Rain / Windy':2
, 'Widespread Dust':1
, 'Thunder and Hail':2
,'Thunder / Windy':2
,'Blowing Dust':1
, 'Patches of Fog':1
, 'Blowing Dust / Windy':1
, 'Rain / Windy':2
, 'Fog / Windy':2
, 'Light Drizzle / Windy':2
, 'Haze / Windy':1
,'Light Snow / Windy':1
, 'Light Snow':1
,'T-Storm / Windy':2
,'Light Sleet':1
}
df['Cloud_numerical'].replace(d1, inplace= True)
df['new_hour_date'] = df['hour'] + ' '+ df['Date']
df['New_datetime'] = pd.to_datetime(df['new_hour_date'],infer_datetime_format=True, format ='%m/%d/%Y %H')
df['time_rounded'] = df['New_datetime'].dt.round('H').dt.hour
df['time_rounded'] = df['time_rounded'].apply(str)
df['time_rounded2'] = df['Date'] + ' '+ df['time_rounded']
df['time_rounded4']= df['time_rounded2'].apply(lambda x:f'{x}:00:00')
df['New_datetime2'] = pd.to_datetime(df['time_rounded4'],infer_datetime_format=True,format ='%m/%d/%Y %H')
df['New_datetime'] = pd.to_datetime(df['New_datetime'],infer_datetime_format=True,format ='%m/%d/%Y %H')
pd_series_precip = df['precip']
precip_lst = []
for string in pd_series_precip:
string = string.replace(u'\xa0in','')
precip_lst.append(string)
results_precip = pd.Series(precip_lst)
df['precip1']= results_precip
df['precip1'] = df['precip1'].astype(float)
pd_series_dew = df['dew']
dew_lst = []
for string in pd_series_dew:
string = string.replace(u'\xa0F','')
dew_lst.append(string)
results = pd.Series(dew_lst)
df['dew1']= results
df['dew1'] = df['dew1'].astype(float)
pd_series_wind = df['wind_speed']
wind_lst = []
for string in pd_series_wind:
string = string.replace(u'\xa0mph','')
if string == '0.00\xa0':
string = '0.00'
wind_lst.append(string)
results = | pd.Series(wind_lst) | pandas.Series |
import os
import numpy as np
import pandas as pd
import dask.dataframe as dd
import random
import logging
import hashlib
from functools import reduce
from operator import add
import string
from .match import match_lists
from lsst.pipe.tasks.functors import Labeller, CompositeFunctor, RAColumn, DecColumn
from .utils import result
# This is for temporary backwards compatibility
try:
from lsst.pipe.analysis.utils import Filenamer
except ImportError:
pass
class Catalog(object):
"""Base class for columnwise interface to afwTable
The idea behind this is to allow for access to only specified
columns of one or more `lsst.afw.Table` objects, without necessarily
having to load the entire table(s) into memory.
Subclasses must implement at least `__init__`, `get_columns`,
and `_stringify`, and probably also `_initialize` and `_apply_func`.
"""
index_column = 'id'
def _initialize(self):
"""Set lots of properties that will be lazily calculated
"""
self._coords = None
self._md5 = None
def _stringify(self):
"""Return string representing catalog, for md5 hashing
"""
raise NotImplementedError
def _compute_md5(self):
return hashlib.md5(self._stringify())
@property
def md5(self):
"""Hash of the catalog
Computed from an md5 hash of `._stringify`.
"""
if self._md5 is None:
self._md5 = self._compute_md5().hexdigest()
return self._md5
def __hash__(self):
return hash(self.md5)
def get_columns(self, columns, **kwargs):
"""Returns dataframe of desired columns
`
Parameters
----------
columns : list
List of column names to be returned.
"""
raise NotImplementedError('Must implement get_columns!')
def _get_coords(self):
df = self.get_columns(['coord_ra', 'coord_dec'], add_flags=False)
# Hack to avoid phantom 'dir0' column
df = result(df)
if 'dir0' in df.columns:
df = df.drop('dir0', axis=1)
self._coords = (df*180 / np.pi).rename(columns={'coord_ra':'ra',
'coord_dec':'dec'})
@property
def df_all(self):
return self.get_columns(self.columns, add_flags=False)
@property
def coords(self):
if self._coords is None:
self._get_coords()
return self._coords
@property
def ra(self):
return self.coords['ra']
@property
def dec(self):
return self.coords['dec']
@property
def index(self):
return self.coords.index
def _apply_func(self, func, query=None, client=None):
"""Defines how to compute the output of a functor
This method partially defines what happens when you call
a functor with a catalog; the full behavior is a combination
of this method and the `__call__` method of the functor.
Parameters
----------
func : explorer.functors.Functor
Functor to be calculated.
See also
--------
explorer.functors.Functor
"""
df = self.get_columns(func.columns, query=query, client=client)
if len(df.columns)==0:
vals = pd.Series(np.nan, index=df.index)
else:
vals = func._func(df)
return vals
class MatchedCatalog(Catalog):
"""Interface to two afwTables at a time.
Matches sources from two catalogs with KDTree,
within `match_radius`. If you provide a `match_registry`
filename, then the match data will be persisted (keyed by
the hash of the catalog), for fast loading in the future.
Parameters
----------
cat1, cat2 : `Catalog` objects
Catalogs to match.
match_radius : float
Maximum radius to match sources
match_registry : str
HDF file containing persisted match data.
"""
def __init__(self, cat1, cat2, match_radius=0.5,
match_registry=None):
self.cat1 = cat1
self.cat2 = cat2
self.match_radius = match_radius
self.match_registry = match_registry
self._initialize()
def _initialize(self):
self._matched = False
self._coords = None
self._match_distance = None
self._match_inds1 = None
self._match_inds2 = None
self._md5 = None
def _stringify(self):
return self.cat1._stringify() + self.cat2._stringify()
def _get_coords(self):
self._coords = self.cat1.coords
def match(self, **kwargs):
"""Run the catalog matching.
"""
return self._match_cats(**kwargs)
def _read_registry(self):
"""Load persisted match data
Returns
-------
inds1, inds2, dist: pandas.Int64Index objects, pandas.Series
Matched indices and match_distance data.
"""
if self.match_registry is None:
raise ValueError
store = pd.HDFStore(self.match_registry)
df = store['md5_{}'.format(self.md5)]
store.close()
inds1 = df.index
inds2 = pd.Int64Index(df['id2'], name='id')
dist = df['distance'].rename('match_distance')
return inds1, inds2, dist
def _write_registry(self, match_df):
"""Write match data to registry
No-op if `self.match_registry` is not set.
Parameters
----------
match_df : pandas.DataFrame
Match data. Index of DataFrame corrdesponds to index values
of `self.cat1`; `id2` column is index values of `self.cat2`;
`distance` column is the match distance.
"""
if self.match_registry is None:
return
else:
match_df.to_hdf(self.match_registry, 'md5_{}'.format(self.md5))
def _test_registry(self):
"""Test to make sure match loaded from registry is same as fresh-calculated
"""
id1, id2, dist = self._read_registry()
self.match(recalc=True)
assert (id1==self._match_inds1).all()
assert (id2==self._match_inds2).all()
assert (dist==self._match_distance).all()
def _match_cats(self, recalc=False):
"""Determine which indices in cat2 correspond to the same objects in cat1
Computes match using `explorer.match.match_lists`, which uses a KDTree-based
algorithm. If `self.match_registry` is defined but the match hasn't been
computed before, then the results are written to that file. If the match has been
computed and persisted, then it is just loaded.
Match information is stored in the form of `pandas.Index` objects: `match_inds1`
and `match_inds2`, which are *label* indices, not positional. Note that
the `get_columns` method for this object does not return row-matched columns;
in order to get properly row-matched columns from the two catalogs, you need to index
the outputs with `match_inds1` and `match_inds2`, e.g.,
catalog = MatchedCatalog(cat1, cat2)
df1, df2 = catalog.get_columns([cols])
df1 = df1.loc[catalog.match_inds1]
df2 = df2.loc[catalog.match_inds2]
Now, the rows of `df1` and `df2` can be compared as belonging to the "same" (matched)
objects. This behavior is implemented in `_apply_func`.
Parameters
----------
recalc : bool
If False, then this will attempt to read from the `match_registry` file.
If True, then even if `match_registry` is defined, the match will be recomputed
"""
try:
if recalc:
raise ValueError
i1, i2, d = self._read_registry()
except (KeyError, ValueError):
ra1, dec1 = self.cat1.ra, self.cat1.dec
ra2, dec2 = self.cat2.ra, self.cat2.dec
id1 = ra1.index
id2 = ra2.index
dist, inds = match_lists(ra1, dec1, ra2, dec2, self.match_radius/3600)
good = np.isfinite(dist)
logging.info('{0} matched within {1} arcsec, {2} did not.'.format(good.sum(), self.match_radius, (~good).sum()))
# Save indices as labels, not positions, as required by dask
i1 = id1[good]
i2 = id2[inds[good]]
d = pd.Series(dist[good] * 3600, index=id1[good], name='match_distance')
match_df = pd.DataFrame({'id2':i2, 'distance':d}, index=i1)
self._write_registry(match_df)
self._match_inds1 = i1
self._match_inds2 = i2
self._match_distance = d
self._matched = True
@property
def match_distance(self):
"""Distance between objects identified as matches
"""
if self._match_distance is None:
self._match_cats()
return self._match_distance
@property
def match_inds1(self):
if self._match_inds1 is None:
self._match_cats()
return self._match_inds1
@property
def match_inds2(self):
if self._match_inds2 is None:
self._match_cats()
return self._match_inds2
@property
def match_inds(self):
return self.match_inds1, self.match_inds2
def get_columns(self, *args, **kwargs):
"""Retrieve columns from both catalogs, without matching
"""
df1 = self.cat1.get_columns(*args, **kwargs)
df2 = self.cat2.get_columns(*args, **kwargs)
# df2.set_index(dd.Series(df1.index))
return df1, df2
def _apply_func(self, func, query=None, how='difference', client=None):
"""Get the results of applying a functor
Returns row-matched computation on a catalog.
Parameters
----------
func : explorer.functors.Functor
Functor to be calculated.
query : str
[Queries not currently completely or consistently implemented.]
how : str
Allowed values:
* 'difference' (default): returns difference of matched computed values
* 'sum': returns sum of matched computed values
* 'first': returns computed values from `self.cat1`
* 'second': returns computed values from `self.cat2`
* 'all': returns computed values from both catalogs.
"""
df1, df2 = self.get_columns(func.columns, query=query, client=client)
# Check if either returned empty dataframe
df1_empty = len(df1.columns)==0
df2_empty = len(df2.columns)==0
if func.allow_difference or how in ['all', 'second']:
id1, id2 = self.match_inds
if df1_empty:
v1 = pd.Series(np.nan, index=id1)
else:
v1 = result(func._func(df1)).loc[id1].values
if df2_empty:
v2 = pd.Series(np.nan, index=id1)
else:
v2 = result(func._func(df2)).loc[id2].values
if how=='difference':
vals = pd.Series(v1 - v2, index=id1)
elif how=='sum':
vals = | pd.Series(v1 + v2, index=id1) | pandas.Series |
import pandas as pd
from openpyxl import Workbook
import cx_Oracle
import sys
from sqlalchemy import create_engine
from PyQt6 import QtCore, QtGui, QtWidgets
import ctypes
import time
import threading
import qdarktheme
import cgitb
cgitb.enable(format = 'text')
dsn_tns = cx_Oracle.makedsn('ip-banco-oracle', 'porta', service_name='nomedoservico')
conn = cx_Oracle.connect(user=r'usuario', password='<PASSWORD>', dsn=dsn_tns)
c = conn.cursor()
engine = create_engine('sqlite://', echo=False)
class Ui_ConferenciadeNotas(object):
def setupUi(self, ConferenciadeNotas):
ConferenciadeNotas.setObjectName("ConferenciadeNotas")
ConferenciadeNotas.resize(868, 650)
ConferenciadeNotas.setWindowIcon(QtGui.QIcon("icone.ico"))
self.localArquivo = QtWidgets.QTextEdit(ConferenciadeNotas)
self.localArquivo.setGeometry(QtCore.QRect(100, 60, 590, 30))
self.localArquivo.setObjectName("localArquivo")
self.label = QtWidgets.QLabel(ConferenciadeNotas)
self.label.setGeometry(QtCore.QRect(0, 0, 870, 40))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(18)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(ConferenciadeNotas)
self.label_2.setGeometry(QtCore.QRect(10, 60, 90, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_2.setObjectName("label_2")
self.localizarArquivoBT = QtWidgets.QPushButton(ConferenciadeNotas)
self.localizarArquivoBT.setGeometry(QtCore.QRect(700, 60, 160, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
self.localizarArquivoBT.setFont(font)
self.localizarArquivoBT.setObjectName("localizarArquivoBT")
self.localizarArquivoBT.clicked.connect(self.locArquivo)
self.conferidoFiliais = QtWidgets.QTableWidget(ConferenciadeNotas)
self.conferidoFiliais.setGeometry(QtCore.QRect(20, 130, 180, 440))
font = QtGui.QFont()
font.setFamily("Century Gothic")
self.conferidoFiliais.setFont(font)
self.conferidoFiliais.setRowCount(16)
self.conferidoFiliais.setObjectName("conferidoFiliais")
self.conferidoFiliais.setColumnCount(3)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(12, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(13, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(14, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(15, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setKerning(True)
item.setFont(font)
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(4, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(4, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(5, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(5, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(5, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(6, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(6, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(6, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(7, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(7, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(7, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(8, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(8, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(8, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(9, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(9, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(9, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(10, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(10, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(10, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(11, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(11, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(11, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(12, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(12, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(12, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(13, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(13, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(13, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(14, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(14, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(14, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(15, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(15, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(15, 2, item)
self.conferidoFiliais.horizontalHeader().setDefaultSectionSize(50)
self.conferidoFiliais.horizontalHeader().setMinimumSectionSize(50)
self.conferidoFiliais.verticalHeader().setDefaultSectionSize(23)
self.conferidoFiliais.verticalHeader().setMinimumSectionSize(23)
self.nfsComErro = QtWidgets.QTableWidget(ConferenciadeNotas)
self.nfsComErro.setGeometry(QtCore.QRect(200, 130, 651, 440))
font = QtGui.QFont()
font.setFamily("Century Gothic")
self.nfsComErro.setFont(font)
#self.nfsComErro.setRowCount(100)
self.nfsComErro.setObjectName("nfsComErro")
self.nfsComErro.setColumnCount(6)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(5, item)
self.nfsComErro.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.ExtendedSelection)
self.nfsComErro.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectionBehavior.SelectItems)
self.label_3 = QtWidgets.QLabel(ConferenciadeNotas)
self.label_3.setGeometry(QtCore.QRect(0, 100, 870, 20))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_3.setObjectName("label_3")
self.exportResult = QtWidgets.QPushButton(ConferenciadeNotas)
self.exportResult.setGeometry(QtCore.QRect(703, 600, 150, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
self.exportResult.setFont(font)
self.exportResult.setObjectName("exportResult")
self.exportResult.setText('Exportar')
self.exportResult.clicked.connect(self.exportExcel)
self.retranslateUi(ConferenciadeNotas)
QtCore.QMetaObject.connectSlotsByName(ConferenciadeNotas)
self.rows = 0
self.conferidoFiliais.horizontalHeader().setStretchLastSection(True)
self.nfsComErro.horizontalHeader().setStretchLastSection(True)
self.conferidoFiliais.horizontalHeader().setStyleSheet(""" QHeaderView::section {padding-left: 2;
padding-right: -10;
}""")
self.nfsComErro.horizontalHeader().setStyleSheet(""" QHeaderView::section {padding-left: 2;
padding-right: -10;
}""")
def retranslateUi(self, ConferenciadeNotas):
_translate = QtCore.QCoreApplication.translate
ConferenciadeNotas.setWindowTitle(_translate("ConferenciadeNotas", "Conferência de Notas CIGAMxSEFAZ"))
self.label.setText(_translate("ConferenciadeNotas", "Conferência de Notas CIGAM x SEFAZ"))
self.label_2.setText(_translate("ConferenciadeNotas", "Arquivo:"))
self.localizarArquivoBT.setText(_translate("ConferenciadeNotas", "Localizar Arquivo"))
item = self.conferidoFiliais.verticalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(3)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(4)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(5)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(6)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(7)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(8)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(9)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(10)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(11)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(12)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(13)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(14)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(15)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.horizontalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", "UN"))
item = self.conferidoFiliais.horizontalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", "NFE"))
item = self.conferidoFiliais.horizontalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", "NFCE"))
__sortingEnabled = self.conferidoFiliais.isSortingEnabled()
self.conferidoFiliais.setSortingEnabled(False)
item = self.conferidoFiliais.item(0, 0)
item.setText(_translate("ConferenciadeNotas", "001"))
item = self.conferidoFiliais.item(1, 0)
item.setText(_translate("ConferenciadeNotas", "002"))
item = self.conferidoFiliais.item(2, 0)
item.setText(_translate("ConferenciadeNotas", "003"))
item = self.conferidoFiliais.item(3, 0)
item.setText(_translate("ConferenciadeNotas", "004"))
item = self.conferidoFiliais.item(4, 0)
item.setText(_translate("ConferenciadeNotas", "005"))
item = self.conferidoFiliais.item(5, 0)
item.setText(_translate("ConferenciadeNotas", "006"))
item = self.conferidoFiliais.item(6, 0)
item.setText(_translate("ConferenciadeNotas", "007"))
item = self.conferidoFiliais.item(7, 0)
item.setText(_translate("ConferenciadeNotas", "008"))
item = self.conferidoFiliais.item(8, 0)
item.setText(_translate("ConferenciadeNotas", "009"))
item = self.conferidoFiliais.item(9, 0)
item.setText(_translate("ConferenciadeNotas", "010"))
item = self.conferidoFiliais.item(10, 0)
item.setText(_translate("ConferenciadeNotas", "011"))
item = self.conferidoFiliais.item(11, 0)
item.setText(_translate("ConferenciadeNotas", "013"))
item = self.conferidoFiliais.item(12, 0)
item.setText(_translate("ConferenciadeNotas", "014"))
item = self.conferidoFiliais.item(13, 0)
item.setText(_translate("ConferenciadeNotas", "016"))
item = self.conferidoFiliais.item(14, 0)
item.setText(_translate("ConferenciadeNotas", "100"))
item = self.conferidoFiliais.item(15, 0)
item.setText(_translate("ConferenciadeNotas", "200"))
self.conferidoFiliais.setSortingEnabled(__sortingEnabled)
item = self.nfsComErro.horizontalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", "UN"))
item = self.nfsComErro.horizontalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", "SERIE"))
item = self.nfsComErro.horizontalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", "NOTA"))
item = self.nfsComErro.horizontalHeaderItem(3)
item.setText(_translate("ConferenciadeNotas", "DATA"))
item = self.nfsComErro.horizontalHeaderItem(4)
item.setText(_translate("ConferenciadeNotas", "SITUACAO"))
item = self.nfsComErro.horizontalHeaderItem(5)
item.setText(_translate("ConferenciadeNotas", "TEM"))
self.label_3.setText(_translate("ConferenciadeNotas", "Unidade: Série: Data: até "))
def locArquivo(self):
arquivoLocal = QtWidgets.QFileDialog.getOpenFileNames(filter='*.xls')[0]
if (arquivoLocal == []):
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo não localizado ou invalido!', 0)
for files in arquivoLocal:
self.localArquivo.setText(' ')
self.localArquivo.setText(files)
self.file = files
df = pd.read_excel(self.file, skiprows=lambda x: x not in list(range(6, 9999)))
sqlSerie = " SELECT DISTINCT(A.SERIE) FROM (select CASE WHEN [SÉRIE] = '3' THEN 'NFE' WHEN [SÉRIE] = '7' THEN 'NFCE' WHEN [SÉRIE] = '8' THEN '2NFCE' ELSE 'NFCE' END AS SERIE \
FROM NFSEFAZ) A "
try:
df.to_sql('NFSEFAZ', engine, if_exists='replace', index=False)
except:
pass
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo '+ self.file + ' invalido, favor verificar!', 0)
try:
serieDf = engine.execute(sqlSerie)
except:
pass
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo '+ self.file + ' invalido, favor verificar!', 0)
serieFim = pd.DataFrame(serieDf, columns=['SERIE'])
self.serieTxt = serieFim.iloc[0]['SERIE']
try:
self.serieTxt2 = serieFim.iloc[1]['SERIE']
except:
pass
self.serieTxt2 = serieFim.iloc[0]['SERIE']
if(self.serieTxt in ['NFCE','2NFCE']):
file = self.file
dff = pd.read_excel(file, skiprows=lambda x: x not in list(range(0, 6)))
dff.to_sql('NFCESEFAZ', engine, if_exists='replace', index=False)
ie_un = engine.execute('SELECT REPLACE(SUBSTR("SECRETARIA DE ESTADO DE FAZENDA",21,10),"-","") FROM NFCESEFAZ WHERE "SECRETARIA DE ESTADO DE FAZENDA" LIKE "%INSCRIÇÃO ESTADUAL%"')
ie_un = ie_un.first()[0]
df = pd.read_excel(file, skiprows=lambda x: x not in list(range(6, 9999)))
sqlsefaz = (" select CASE WHEN {} = 130241750 THEN '001' \
WHEN {} = 131817086 THEN '002'\
WHEN {} = 131838245 THEN '003'\
WHEN {} = 131875523 THEN '004'\
WHEN {} = 131980203 THEN '005'\
WHEN {} = 132009412 THEN '006'\
WHEN {} = 132894939 THEN '007'\
WHEN {} = 132702371 THEN '008'\
WHEN {} = 133644065 THEN '009'\
WHEN {} = 131537326 THEN '010'\
WHEN {} = 133446565 THEN '011'\
WHEN {} = 132124726 THEN '013'\
WHEN {} = 133779416 THEN '014'\
WHEN {} = 133830900 THEN '016'\
WHEN {} = 133762033 THEN '100'\
WHEN {} = 131847031 THEN '200' ELSE {} END AS UN,\
CASE WHEN [SÉRIE] = '3' THEN 'NFE' WHEN [SÉRIE] = '7' THEN 'NFCE' WHEN [SÉRIE] = '8' THEN '2NFCE' ELSE 'NFCE' END AS SERIE,\
[NUMERO NOTA FISCAL] as NF, SUBSTR([DATA EMISSÃO],0,11) as DT_NF, \
CASE WHEN upper([SITUAÇÃO]) = 'CANCELADA FORA DO PRAZO' THEN 'CANCELADA' \
WHEN upper([SITUAÇÃO]) = 'AUTORIZADA FORA PRAZO' THEN 'AUTORIZADA' ELSE upper([SITUAÇÃO]) END AS SITUACAO\
FROM NFSEFAZ ").format(ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un,
ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un)
df.to_sql('NFSEFAZ', engine, if_exists='replace', index=False)
results = engine.execute(sqlsefaz)
final = pd.DataFrame(results, columns=['UN', 'SERIE', 'NF', 'DT_NF', 'SITUACAO'])
final.to_sql('NOTASSEFAZ', engine, if_exists='replace', index=False)
dt_inicio = engine.execute('SELECT MIN(SUBSTR([DATA EMISSÃO],0,11)) FROM NFSEFAZ')
dt_fim = engine.execute('SELECT MAX(SUBSTR([DATA EMISSÃO],0,11)) FROM NFSEFAZ')
un_neg = engine.execute('SELECT distinct(UN) FROM NOTASSEFAZ')
serie_nf = engine.execute('SELECT distinct(SERIE) FROM NOTASSEFAZ')
dt_inicio = dt_inicio.first()[0]
dt_fim = dt_fim.first()[0]
un_neg = un_neg.first()[0]
#serie_nf = [dict(row) for row in serie_nf]
list_serie = []
for row in serie_nf:
list_serie.append(row[0])
list_serie = str(list_serie)[1:-1]
self.label_3.setText("Unidade: " + un_neg + " Série: " + list_serie.replace("'",'').replace(",",' e') + " Data: " + dt_inicio + " até " + dt_fim)
#self.dtLabel["text"] = " Unidade: "+ un_neg + " Série: " + self.serieTxt + " Data: "+ dt_inicio+ " até " + dt_fim
sql = ("""SELECT F.CD_UNIDADE_DE_N,\
F.SERIE,F.NF,TO_CHAR(F.DT_EMISSAO, 'DD/MM/YYYY') AS DT,\
CASE WHEN F.ESPECIE_NOTA = 'S' THEN 'AUTORIZADA' \
WHEN F.ESPECIE_NOTA = 'N' THEN 'CANCELADA' \
WHEN F.ESPECIE_NOTA = 'E' THEN 'AUTORIZADA' \
END AS STATUS \
FROM FANFISCA F \
WHERE F.SERIE in ({}) \
AND F.CD_UNIDADE_DE_N = '{}' \
AND F.DT_EMISSAO BETWEEN '{}' AND '{}' \
""").format(list_serie, un_neg, dt_inicio, dt_fim)
nfbanco = pd.read_sql(sql, conn)
nfbanco.to_sql('NFCIGAM', engine, if_exists='replace', index=False)
comparaNfSefaz = engine.execute(" SELECT S.*,'SEFAZ' AS TEM FROM NOTASSEFAZ S LEFT JOIN NFCIGAM C ON (S.UN = C.CD_UNIDADE_DE_N AND S.SERIE = C.SERIE AND S.NF = C.NF) WHERE C.NF IS NULL")
resultComparaNfSefaz = pd.DataFrame(comparaNfSefaz, columns=['UN', 'SERIE', 'NOTA', 'DATA', 'SITUACAO', 'TEM'])
comparaNfCigam = engine.execute(" SELECT C.*,'CIGAM' AS TEM FROM NFCIGAM C LEFT JOIN NOTASSEFAZ S ON ( C.CD_UNIDADE_DE_N = S.UN AND C.SERIE = S.SERIE AND C.NF = S.NF) WHERE S.NF IS NULL")
resultComparaNfCigam = | pd.DataFrame(comparaNfCigam, columns=['UN', 'SERIE', 'NOTA', 'DATA', 'SITUACAO', 'TEM']) | pandas.DataFrame |
#!/usr/bin/env python3
# coding: utf-8
"""
@author: <NAME> <EMAIL>
@last modified by: <NAME>
@file:cell_type_anno.py
@time:2021/03/09
change log:
2021/05/20 rst supplement. by: qindanhua.
2021/07/08 adjust for restructure base class . by: qindanhua.
"""
import pandas as pd
import numpy as np
import os
from multiprocessing import Pool
import traceback
from ..log_manager import logger
from ..utils.correlation import spearmanr_corr, pearson_corr
from ..preprocess.normalize import normalize_total
from ..config import stereo_conf
from ..utils import remove_file
from ..core.tool_base import ToolBase
class CellTypeAnno(ToolBase):
"""
predict bin-cells's type
:param data: StereoExpData object
:param ref_dir: reference database directory
:param cores: set running core to fasten running speed
:param keep_zeros: if true, keeping the genes that in reference but not in input expression data
:param use_rf: if running random choosing genes or not
:param sample_rate: ratio of sampling data
:param n_estimators: prediction times
:param strategy:
:param method: calculate correlation's method
:param split_num:
Example
-------
>>> from stereo.io.reader import read_stereo
>>>sed = read_stereo('test_gem', 'txt', 'bins')
>>>cta = CellTypeAnno(sed, ref_dir='/path/to/reference_exp_data_dir/')
>>>cta.fit()
cell cell type ... type_cnt_sum type_rate
0 0_0 hereditary spherocytosis cell line ... 20 1.0
1 0_1 hereditary spherocytosis cell line ... 20 1.0
2 0_10 hereditary spherocytosis cell line ... 20 1.0
"""
def __init__(
self,
data,
method='spearmanr',
ref_dir: str = None,
cores: int = 1,
keep_zeros: bool = True,
use_rf: bool = True,
sample_rate: float = 0.8,
n_estimators: int = 20,
strategy='1',
split_num: int = 1,
):
super(CellTypeAnno, self).__init__(data=data, method=method)
self.ref_dir = ref_dir
self.n_jobs = cores
self.keep_zeros = keep_zeros
self.use_rf = use_rf
self.sample_rate = sample_rate
self.n_estimators = n_estimators
self.strategy = strategy
self.split_num = split_num
self.output = stereo_conf.out_dir
@property
def ref_dir(self):
return self._ref_dir
@ref_dir.setter
def ref_dir(self, ref_dir):
"""
set reference directory which must exist two file ref_sample_epx.csv and cell_map.csv
"""
git_ref = 'https://github.com/BGIResearch/stereopy/raw/data/FANTOM5/ref_sample_epx.csv'
if ref_dir is None:
logger.info(f'reference file not found, download from {git_ref}')
ref_dir = os.path.join(stereo_conf.data_dir, 'ref_db', 'FANTOM5')
self.download_ref(ref_dir)
if not os.path.exists(os.path.join(ref_dir, 'ref_sample_epx.csv')) and \
os.path.exists(os.path.join(ref_dir, 'cell_map.csv')):
raise ValueError(
f'reference file not found, ref_dir should exist two file ref_sample_epx.csv and cell_map.csv'
)
self._ref_dir = ref_dir
@ToolBase.method.setter
def method(self, method):
m_range = ['spearmanr', 'pearson']
self._method_check(method, m_range)
def split_dataframe(self, df):
"""
split input data to N(split_num) part
:param df: input expression data frame
:return: N part of data frame
"""
datas = []
logger.info(f'input data: {df.shape[0]} genes, {df.shape[1]} cells.')
if self.split_num > 1:
logger.info(f'split the exp matrix to {self.split_num} matrixs')
step_size = int(df.shape[1]/self.split_num) + 1
for i in range(self.split_num):
start = i * step_size
end = start + step_size if start + step_size < df.shape[1] else df.shape[1]
datas.append(df.iloc[:, start: end])
else:
datas.append(df)
return datas
@staticmethod
def concat_top_corr_files(files, output_dir, prefix=None):
"""
concat correlation files from n-times prediction's result
:param files: all prediction results
:param output_dir: output directory
:param prefix: prefix of output files
:return: correlation dataframe
"""
df = pd.read_csv(files[0])
for f in files[1:]:
df1 = pd.read_csv(f)
df = df.append(df1)
file_name = f'{prefix}_top_annotation.csv' if prefix else 'top_annotation.csv'
df.to_csv(os.path.join(output_dir, file_name), index=False)
return df
def merge_subsample_result(self, input_dir, prefix, output_dir):
"""
generate result
:param input_dir: input directory, output of previous step
:param prefix: prefix of output file
:param output_dir: output directory
:return: result data frame
"""
files = [os.path.join(input_dir, f) for f in os.listdir(input_dir) if prefix in f]
df = pd.read_csv(files[0])
for f in files[1:]:
df1 = | pd.read_csv(f) | pandas.read_csv |
#import required packages
import uuid
import json
import pandas as pd
from rpy2.robjects import r
from rpy2 import robjects as ro
from rpy2.robjects.packages import importr
from sklearn.model_selection import train_test_split
from pathlib import Path
import os
import subprocess
import yaml
from rpy2.robjects import pandas2ri
pandas2ri.activate()
base = importr('base')
feature_extraction = importr('FeatureExtraction')
patient_level_prediction = importr('PatientLevelPrediction')
database_connector = importr('DatabaseConnector')
class FeatureExtractor():
"""
Class for extracting and transforming data in the OMOP Common Data Model
format to build patient-level predictive models using lightsaber
"""
def __init__(self,
**kwargs
):
if 'cohort_connector' not in kwargs.keys():
print('please specify cohort_connector')
elif len(kwargs.keys()) < 2:
print("""Declare feature extraction parameters or specify path to json file with feature extraction settings """)
else:
self.__analysis_id__()
self.__cohort_connector(kwargs['cohort_connector'])
settings = self.__load_settings__(**kwargs)
self.__analysis_name__(settings, **kwargs)
self.__working_directory__(settings, **kwargs)
self.__output_directory__(settings, **kwargs)
self.__covariate__settings__(settings, **kwargs)
self.__model_training_settings__(settings, **kwargs)
self.__expt_config_settings__(settings, **kwargs)
print('Successfully created all settings')
def __cohort_connector(self, cohort_connector):
try:
self._db_connection_details = cohort_connector.db_connection_details
self._r_db_connection_details = cohort_connector.r_db_connection_details
self._cohort_details = cohort_connector.cohort_details
except:
print('Error missing or invalid cohort_connector')
def __load_settings__(self, **kwargs):
settings = {}
try:
with open(kwargs['file_path'], 'r') as f:
try:
settings = json.load(f)
except ValueError:
print("Invalid JSON in " + kwargs['file_path'])
except KeyError:
print("file_path parameter not found")
except OSError:
print("Could not open " + kwargs['file_path'])
else:
if settings:
settings['model_training_settings']['sample_size'] = settings['model_training_settings']['sample_size'] if settings['model_training_settings']['sample_size'] else r('NULL')
settings['model_training_settings']['random_state'] = settings['model_training_settings']['random_state'] if 'random_state' in settings['model_training_settings'].keys() else None
return settings
def __analysis_id__(self):
print('Setting analysis id')
self._analysis_id = str(uuid.uuid4())
@property
def analysis_id(self):
return self._analysis_id
def __analysis_name__(self, settings, **kwargs):
try:
print('Setting analysis name')
if settings:
self._analysis_name = settings['analysis_name']
else:
self._analysis_name = kwargs['analysis_name']
except:
print("""
Missing/incorrect analysis_name
""")
@property
def analysis_name():
return self._analysis_name
def __working_directory__(self, settings, **kwargs):
try:
print('Setting working directory')
if settings:
self._working_directory = settings['working_directory']
else:
self._working_directory = kwargs['working_directory']
if not os.path.exists(self._working_directory):
os.makedirs(self._working_directory)
except:
print("""
Missing/incorrect working_directory
""")
@property
def working_directory():
return self._working_directory
def __output_directory__(self, settings, **kwargs):
try:
print('Setting output directory')
if settings:
self._output_directory = settings['output_directory']
else:
self._output_directory = kwargs['output_directory']
if not os.path.exists(self._output_directory):
os.makedirs(self._output_directory)
except:
print("""
Missing/incorrect output_directory
""")
@property
def output_directory():
return self._output_directory
def __covariate__settings__(self, settings, **kwargs):
self.__baseline_covariate_setting_options__(settings, **kwargs)
self.__baseline_covariate_settings__()
self.__tidy_plp_settings__(settings, **kwargs)
self.__temporal_covariate_setting_options__(settings, **kwargs)
self.__temporal_covariate_settings__()
def __baseline_covariate_setting_options__(self, settings, **kwargs):
options = { 'use_demographics_gender' : False,
'use_demographics_age' : False,
'use_demographics_age_group' : False,
'use_demographics_race' : False,
'use_demographics_ethnicity' : False,
'use_demographics_index_year' : False,
'use_demographics_index_month' : False,
'use_demographics_prior_observation_time' : False,
'use_demographics_post_observation_time' : False,
'use_demographics_time_in_cohort' : False,
'use_demographics_index_year_month' : False,
'use_condition_occurrence_any_time_prior' : True,
'use_condition_occurrence_long_term' : False,
'use_condition_occurrence_medium_term' : False,
'use_condition_occurrence_short_term' : False,
'use_condition_occurrence_primary_inpatient_any_time_prior' : False,
'use_condition_occurrence_primary_inpatient_long_term' : False,
'use_condition_occurrence_primary_inpatient_medium_term' : False,
'use_condition_occurrence_primary_inpatient_short_term' : False,
'use_condition_era_any_time_prior' : False,
'use_condition_era_long_term' : False,
'use_condition_era_medium_term' : False,
'use_condition_era_short_term' : False,
'use_condition_era_overlapping' : False,
'use_condition_era_start_long_term' : False,
'use_condition_era_start_medium_term' : False,
'use_condition_era_start_short_term' : False,
'use_condition_group_era_any_time_prior' : False,
'use_condition_group_era_long_term' : False,
'use_condition_group_era_medium_term' : False,
'use_condition_group_era_short_term' : False,
'use_condition_group_era_overlapping' : False,
'use_condition_group_era_start_long_term' : False,
'use_condition_group_era_start_medium_term' : False,
'use_condition_group_era_start_short_term' : False,
'use_drug_exposure_any_time_prior' : False,
'use_drug_exposure_long_term' : False,
'use_drug_exposure_medium_term' : False,
'use_drug_exposure_short_term' : False,
'use_drug_era_any_time_prior' : False,
'use_drug_era_long_term' : False,
'use_drug_era_medium_term' : False,
'use_drug_era_short_term' : False,
'use_drug_era_overlapping' : False,
'use_drug_era_start_long_term' : False,
'use_drug_era_start_medium_term' : False,
'use_drug_era_start_short_term' : False,
'use_drug_group_era_any_time_prior' : False,
'use_drug_group_era_long_term' : False,
'use_drug_group_era_medium_term' : False,
'use_drug_group_era_short_term' : False,
'use_drug_group_era_overlapping' : False,
'use_drug_group_era_start_long_term' : False,
'use_drug_group_era_start_medium_term' : False,
'use_drug_group_era_start_short_term' : False,
'use_procedure_occurrence_any_time_prior' : False,
'use_procedure_occurrence_long_term' : False,
'use_procedure_occurrence_medium_term' : False,
'use_procedure_occurrence_short_term' : False,
'use_device_exposure_any_time_prior' : False,
'use_device_exposure_long_term' : False,
'use_device_exposure_medium_term' : False,
'use_device_exposure_short_term' : False,
'use_measurement_any_time_prior' : False,
'use_measurement_long_term' : False,
'use_measurement_medium_term' : False,
'use_measurement_short_term' : False,
'use_measurement_value_any_time_prior' : False,
'use_measurement_value_long_term' : False,
'use_measurement_value_medium_term' : False,
'use_measurement_value_short_term' : False,
'use_measurement_range_group_any_time_prior' : False,
'use_measurement_range_group_long_term' : False,
'use_measurement_range_group_medium_term' : False,
'use_measurement_range_group_short_term' : False,
'use_observation_any_time_prior' : False,
'use_observation_long_term' : False,
'use_observation_medium_term' : False,
'use_observation_short_term' : False,
'use_charlson_index' : False,
'use_dcsi' : False,
'use_chads2' : False,
'use_chads2_vasc' : False,
'use_hfrs' : False,
'use_distinct_condition_count_long_term' : False,
'use_distinct_condition_count_medium_term' : False,
'use_distinct_condition_count_short_term' : False,
'use_distinct_ingredient_count_long_term' : False,
'use_distinct_ingredient_count_medium_term' : False,
'use_distinct_ingredient_count_short_term' : False,
'use_distinct_procedure_count_long_term' : False,
'use_distinct_procedure_count_medium_term' : False,
'use_distinct_procedure_count_short_term' : False,
'use_distinct_measurement_count_long_term' : False,
'use_distinct_measurement_count_medium_term' : False,
'use_distinct_measurement_count_short_term' : False,
'use_distinct_observation_count_long_term' : False,
'use_distinct_observation_count_medium_term' : False,
'use_distinct_observation_count_short_term' : False,
'use_visit_count_long_term' : False,
'use_visit_count_medium_term' : False,
'use_visit_count_short_term' : False,
'use_visit_concept_count_long_term' : False,
'use_visit_concept_count_medium_term' : False,
'use_visit_concept_count_short_term' : False,
'long_term_start_days' : -365,
'medium_term_start_days' : -180,
'short_term_start_days' : -30,
'end_days' : 0.0,
'included_covariate_concept_ids' : [],
'add_descendants_to_include' : False,
'excluded_covariate_concept_ids' : [],
'add_descendants_to_exclude' : False,
'included_covariate_ids' : []
}
try:
print("""Setting baseline covariate options""")
if settings:
options.update(dict((k, settings['covariate_settings'][k]) for k in options.keys() if k in settings['covariate_settings'].keys()))
else:
options.update(dict((k, kwargs[k]) for k in options.keys() if k in kwargs.keys()))
self._baseline_covariate_setting_options = options
except:
print("""Error in setting baseline covariate options""")
@property
def baseline_covariate_setting_options(self):
return self._baseline_covariate_setting_options
def __baseline_covariate_settings__(self):
"""
Creates an object of type covariateSettings for baseline covariates, to be used in other functions.
"""
try:
print('Constructing baseline covariate settings')
self._baseline_covariate_settings = feature_extraction.createCovariateSettings(
useDemographicsGender = self._baseline_covariate_setting_options['use_demographics_gender'],
useDemographicsAge = self._baseline_covariate_setting_options['use_demographics_age'],
useDemographicsAgeGroup = self._baseline_covariate_setting_options['use_demographics_age_group'],
useDemographicsRace = self._baseline_covariate_setting_options['use_demographics_race'],
useDemographicsEthnicity = self._baseline_covariate_setting_options['use_demographics_ethnicity'],
useDemographicsIndexYear = self._baseline_covariate_setting_options['use_demographics_index_year'],
useDemographicsIndexMonth = self._baseline_covariate_setting_options['use_demographics_index_month'],
useDemographicsPriorObservationTime = self._baseline_covariate_setting_options['use_demographics_prior_observation_time'],
useDemographicsPostObservationTime = self._baseline_covariate_setting_options['use_demographics_post_observation_time'],
useDemographicsTimeInCohort = self._baseline_covariate_setting_options['use_demographics_time_in_cohort'],
useDemographicsIndexYearMonth = self._baseline_covariate_setting_options['use_demographics_index_year_month'],
useConditionOccurrenceAnyTimePrior = self._baseline_covariate_setting_options['use_condition_occurrence_any_time_prior'],
useConditionOccurrenceLongTerm = self._baseline_covariate_setting_options['use_condition_occurrence_long_term'],
useConditionOccurrenceMediumTerm = self._baseline_covariate_setting_options['use_condition_occurrence_medium_term'],
useConditionOccurrenceShortTerm = self._baseline_covariate_setting_options['use_condition_occurrence_short_term'],
useConditionOccurrencePrimaryInpatientAnyTimePrior = self._baseline_covariate_setting_options['use_condition_occurrence_primary_inpatient_any_time_prior'],
useConditionOccurrencePrimaryInpatientLongTerm = self._baseline_covariate_setting_options['use_condition_occurrence_primary_inpatient_long_term'],
useConditionOccurrencePrimaryInpatientMediumTerm = self._baseline_covariate_setting_options['use_condition_occurrence_primary_inpatient_medium_term'],
useConditionOccurrencePrimaryInpatientShortTerm = self._baseline_covariate_setting_options['use_condition_occurrence_primary_inpatient_short_term'],
useConditionEraAnyTimePrior = self._baseline_covariate_setting_options['use_condition_era_any_time_prior'],
useConditionEraLongTerm = self._baseline_covariate_setting_options['use_condition_era_long_term'],
useConditionEraMediumTerm = self._baseline_covariate_setting_options['use_condition_era_medium_term'],
useConditionEraShortTerm = self._baseline_covariate_setting_options['use_condition_era_short_term'],
useConditionEraOverlapping = self._baseline_covariate_setting_options['use_condition_era_overlapping'],
useConditionEraStartLongTerm = self._baseline_covariate_setting_options['use_condition_era_start_long_term'],
useConditionEraStartMediumTerm = self._baseline_covariate_setting_options['use_condition_era_start_medium_term'],
useConditionEraStartShortTerm = self._baseline_covariate_setting_options['use_condition_era_start_short_term'],
useConditionGroupEraAnyTimePrior = self._baseline_covariate_setting_options['use_condition_group_era_any_time_prior'],
useConditionGroupEraLongTerm = self._baseline_covariate_setting_options['use_condition_group_era_long_term'],
useConditionGroupEraMediumTerm = self._baseline_covariate_setting_options['use_condition_group_era_medium_term'],
useConditionGroupEraShortTerm = self._baseline_covariate_setting_options['use_condition_group_era_short_term'],
useConditionGroupEraOverlapping = self._baseline_covariate_setting_options['use_condition_group_era_overlapping'],
useConditionGroupEraStartLongTerm = self._baseline_covariate_setting_options['use_condition_group_era_start_long_term'],
useConditionGroupEraStartMediumTerm = self._baseline_covariate_setting_options['use_condition_group_era_start_medium_term'],
useConditionGroupEraStartShortTerm = self._baseline_covariate_setting_options['use_condition_group_era_start_short_term'],
useDrugExposureAnyTimePrior = self._baseline_covariate_setting_options['use_drug_exposure_any_time_prior'],
useDrugExposureLongTerm = self._baseline_covariate_setting_options['use_drug_exposure_long_term'],
useDrugExposureMediumTerm = self._baseline_covariate_setting_options['use_drug_exposure_medium_term'],
useDrugExposureShortTerm = self._baseline_covariate_setting_options['use_drug_exposure_short_term'],
useDrugEraAnyTimePrior = self._baseline_covariate_setting_options['use_drug_era_any_time_prior'],
useDrugEraLongTerm = self._baseline_covariate_setting_options['use_drug_era_long_term'],
useDrugEraMediumTerm = self._baseline_covariate_setting_options['use_drug_era_medium_term'],
useDrugEraShortTerm = self._baseline_covariate_setting_options['use_drug_era_short_term'],
useDrugEraOverlapping = self._baseline_covariate_setting_options['use_drug_era_overlapping'],
useDrugEraStartLongTerm = self._baseline_covariate_setting_options['use_drug_era_start_long_term'],
useDrugEraStartMediumTerm = self._baseline_covariate_setting_options['use_drug_era_start_medium_term'],
useDrugEraStartShortTerm = self._baseline_covariate_setting_options['use_drug_era_start_short_term'],
useDrugGroupEraAnyTimePrior = self._baseline_covariate_setting_options['use_drug_group_era_any_time_prior'],
useDrugGroupEraLongTerm = self._baseline_covariate_setting_options['use_drug_group_era_long_term'],
useDrugGroupEraMediumTerm = self._baseline_covariate_setting_options['use_drug_group_era_medium_term'],
useDrugGroupEraShortTerm = self._baseline_covariate_setting_options['use_drug_group_era_short_term'],
useDrugGroupEraOverlapping = self._baseline_covariate_setting_options['use_drug_group_era_overlapping'],
useDrugGroupEraStartLongTerm = self._baseline_covariate_setting_options['use_drug_group_era_start_long_term'],
useDrugGroupEraStartMediumTerm = self._baseline_covariate_setting_options['use_drug_group_era_start_medium_term'],
useDrugGroupEraStartShortTerm = self._baseline_covariate_setting_options['use_drug_group_era_start_short_term'],
useProcedureOccurrenceAnyTimePrior = self._baseline_covariate_setting_options['use_procedure_occurrence_any_time_prior'],
useProcedureOccurrenceLongTerm = self._baseline_covariate_setting_options['use_procedure_occurrence_long_term'],
useProcedureOccurrenceMediumTerm = self._baseline_covariate_setting_options['use_procedure_occurrence_medium_term'],
useProcedureOccurrenceShortTerm = self._baseline_covariate_setting_options['use_procedure_occurrence_short_term'],
useDeviceExposureAnyTimePrior = self._baseline_covariate_setting_options['use_device_exposure_any_time_prior'],
useDeviceExposureLongTerm = self._baseline_covariate_setting_options['use_device_exposure_long_term'],
useDeviceExposureMediumTerm = self._baseline_covariate_setting_options['use_device_exposure_medium_term'],
useDeviceExposureShortTerm = self._baseline_covariate_setting_options['use_device_exposure_short_term'],
useMeasurementAnyTimePrior = self._baseline_covariate_setting_options['use_measurement_any_time_prior'],
useMeasurementLongTerm = self._baseline_covariate_setting_options['use_measurement_long_term'],
useMeasurementMediumTerm = self._baseline_covariate_setting_options['use_measurement_medium_term'],
useMeasurementShortTerm = self._baseline_covariate_setting_options['use_measurement_short_term'],
useMeasurementValueAnyTimePrior = self._baseline_covariate_setting_options['use_measurement_value_any_time_prior'],
useMeasurementValueLongTerm = self._baseline_covariate_setting_options['use_measurement_value_long_term'],
useMeasurementValueMediumTerm = self._baseline_covariate_setting_options['use_measurement_value_medium_term'],
useMeasurementValueShortTerm = self._baseline_covariate_setting_options['use_measurement_value_short_term'],
useMeasurementRangeGroupAnyTimePrior = self._baseline_covariate_setting_options['use_measurement_range_group_any_time_prior'],
useMeasurementRangeGroupLongTerm = self._baseline_covariate_setting_options['use_measurement_range_group_long_term'],
useMeasurementRangeGroupMediumTerm = self._baseline_covariate_setting_options['use_measurement_range_group_medium_term'],
useMeasurementRangeGroupShortTerm = self._baseline_covariate_setting_options['use_measurement_range_group_short_term'],
useObservationAnyTimePrior = self._baseline_covariate_setting_options['use_observation_any_time_prior'],
useObservationLongTerm = self._baseline_covariate_setting_options['use_observation_long_term'],
useObservationMediumTerm = self._baseline_covariate_setting_options['use_observation_medium_term'],
useObservationShortTerm = self._baseline_covariate_setting_options['use_observation_short_term'],
useCharlsonIndex = self._baseline_covariate_setting_options['use_charlson_index'],
useDcsi = self._baseline_covariate_setting_options['use_dcsi'],
useChads2 = self._baseline_covariate_setting_options['use_chads2'],
useChads2Vasc = self._baseline_covariate_setting_options['use_chads2_vasc'],
useHfrs = self._baseline_covariate_setting_options['use_hfrs'],
useDistinctConditionCountLongTerm = self._baseline_covariate_setting_options['use_distinct_condition_count_long_term'],
useDistinctConditionCountMediumTerm = self._baseline_covariate_setting_options['use_distinct_condition_count_medium_term'],
useDistinctConditionCountShortTerm = self._baseline_covariate_setting_options['use_distinct_condition_count_short_term'],
useDistinctIngredientCountLongTerm = self._baseline_covariate_setting_options['use_distinct_ingredient_count_long_term'],
useDistinctIngredientCountMediumTerm = self._baseline_covariate_setting_options['use_distinct_ingredient_count_medium_term'],
useDistinctIngredientCountShortTerm = self._baseline_covariate_setting_options['use_distinct_ingredient_count_short_term'],
useDistinctProcedureCountLongTerm = self._baseline_covariate_setting_options['use_distinct_procedure_count_long_term'],
useDistinctProcedureCountMediumTerm = self._baseline_covariate_setting_options['use_distinct_procedure_count_medium_term'],
useDistinctProcedureCountShortTerm = self._baseline_covariate_setting_options['use_distinct_procedure_count_short_term'],
useDistinctMeasurementCountLongTerm = self._baseline_covariate_setting_options['use_distinct_measurement_count_long_term'],
useDistinctMeasurementCountMediumTerm = self._baseline_covariate_setting_options['use_distinct_measurement_count_medium_term'],
useDistinctMeasurementCountShortTerm = self._baseline_covariate_setting_options['use_distinct_measurement_count_short_term'],
useDistinctObservationCountLongTerm = self._baseline_covariate_setting_options['use_distinct_observation_count_long_term'],
useDistinctObservationCountMediumTerm = self._baseline_covariate_setting_options['use_distinct_observation_count_medium_term'],
useDistinctObservationCountShortTerm = self._baseline_covariate_setting_options['use_distinct_observation_count_short_term'],
useVisitCountLongTerm = self._baseline_covariate_setting_options['use_visit_count_long_term'],
useVisitCountMediumTerm = self._baseline_covariate_setting_options['use_visit_count_medium_term'],
useVisitCountShortTerm = self._baseline_covariate_setting_options['use_visit_count_short_term'],
useVisitConceptCountLongTerm = self._baseline_covariate_setting_options['use_visit_concept_count_long_term'],
useVisitConceptCountMediumTerm = self._baseline_covariate_setting_options['use_visit_concept_count_medium_term'],
useVisitConceptCountShortTerm = self._baseline_covariate_setting_options['use_visit_concept_count_short_term'],
longTermStartDays = self._baseline_covariate_setting_options['long_term_start_days'],
mediumTermStartDays = self._baseline_covariate_setting_options['medium_term_start_days'],
shortTermStartDays = self._baseline_covariate_setting_options['short_term_start_days'],
endDays = self._baseline_covariate_setting_options['end_days'],
includedCovariateConceptIds = ro.vectors.IntVector(self._baseline_covariate_setting_options['included_covariate_concept_ids']),
addDescendantsToInclude = self._baseline_covariate_setting_options['add_descendants_to_include'],
excludedCovariateConceptIds = ro.vectors.IntVector(self._baseline_covariate_setting_options['excluded_covariate_concept_ids']),
addDescendantsToExclude = self._baseline_covariate_setting_options['add_descendants_to_exclude'],
includedCovariateIds = ro.vectors.IntVector(self._baseline_covariate_setting_options['included_covariate_ids']))
except:
print("""
Error in constructing baseline covariate settings
""")
@property
def baseline_covariate_settings(self):
return self._baseline_covariate_settings
def __tidy_plp_settings__(self, settings, **kwargs):
try:
print("""Setting covariate tyding options""")
if settings:
self._tidy_plp_settings = settings['tidy_covariate_settings']
else:
self._tidy_plp_settings ={'min_fraction' : kwargs['min_fraction'],
'normalize' : kwargs['normalize'],
'remove_redundancy' : kwargs['remove_redundancy']
}
except:
print("""Missing/incorrect covariate tyding settings. Specify covariate tidying parameters as follows:
min_fraction: Minimum fraction of the population that should have a non-zero value for a covariate
normalize: If true, normalize the covariates by dividing by the max
remove_redundancy: If true, remove redundant covariates
""")
@property
def tidy_plp_settings(self):
return self._tidy_plp_settings
def __temporal_covariate_setting_options__(self, settings, **kwargs):
options = {
'use_demographics_gender' : False,
'use_demographics_age' : False,
'use_demographics_age_group' : False,
'use_demographics_race' : False,
'use_demographics_ethnicity' : False,
'use_demographics_index_year' : False,
'use_demographics_index_month' : False,
'use_demographics_prior_observation_time' : False,
'use_demographics_post_observation_time' : False,
'use_demographics_time_in_cohort' : False,
'use_demographics_index_year_month' : False,
'use_condition_occurrence' : False,
'use_condition_occurrence_primary_inpatient' : False,
'use_condition_era_start' : False,
'use_condition_era_overlap' : False,
'use_condition_era_group_start' : False,
'use_condition_era_group_overlap' : False,
'use_drug_exposure' : False,
'use_drug_era_start' : False,
'use_drug_era_overlap' : False,
'use_drug_era_group_start' : False,
'use_drug_era_group_overlap' : False,
'use_procedure_occurrence' : False,
'use_device_exposure' : False,
'use_measurement' : False,
'use_measurement_value' : False,
'use_measurement_range_group' : False,
'use_observation' : False,
'use_charlson_index' : False,
'use_dcsi' : False,
'use_chads2' : False,
'use_chads2_vasc' : False,
'use_hfrs' : False,
'use_distinct_condition_count' : False,
'use_distinct_ingredient_count' : False,
'use_distinct_procedure_count' : False,
'use_distinct_measurement_count' : False,
'use_distinct_observation_count' : False,
'use_visit_count' : False,
'use_visit_concept_count' : False,
'temporal_start_days' : list(range(-365,0)),
'temporal_end_days' : list(range(-365,0)),
'included_covariate_concept_ids' : [],
'add_descendants_to_include' : False,
'excluded_covariate_concept_ids' : [],
'add_descendants_to_exclude' : False,
'included_covariate_ids' : []
}
try:
print("""Setting temporal covariate options""")
if settings:
options.update(dict((k, settings['covariate_settings'][k]) for k in options.keys() if k in settings['covariate_settings'].keys()))
else:
options.update(dict((k, kwargs[k]) for k in options.keys() if k in kwargs.keys()))
self._temporal_covariate_setting_options = options
except:
print("""Error in setting baseline covariate options""")
@property
def temporal_covariate_setting_options(self):
return self._temporal_covariate_setting_options
def __temporal_covariate_settings__(self):
"""
Creates an object of type covariateSettings for temporal covariates, to be used in other functions.
"""
try:
print('Constructing temporal covariate settings')
self._temporal_covariate_settings = feature_extraction.createTemporalCovariateSettings (
useDemographicsGender = self._temporal_covariate_setting_options['use_demographics_gender'],
useDemographicsAge = self._temporal_covariate_setting_options['use_demographics_age'],
useDemographicsAgeGroup = self._temporal_covariate_setting_options['use_demographics_age_group'],
useDemographicsRace = self._temporal_covariate_setting_options['use_demographics_race'],
useDemographicsEthnicity = self._temporal_covariate_setting_options['use_demographics_ethnicity'],
useDemographicsIndexYear = self._temporal_covariate_setting_options['use_demographics_index_year'],
useDemographicsIndexMonth = self._temporal_covariate_setting_options['use_demographics_index_month'],
useDemographicsPriorObservationTime = self._temporal_covariate_setting_options['use_demographics_prior_observation_time'],
useDemographicsPostObservationTime = self._temporal_covariate_setting_options['use_demographics_post_observation_time'],
useDemographicsTimeInCohort = self._temporal_covariate_setting_options['use_demographics_time_in_cohort'],
useDemographicsIndexYearMonth = self._temporal_covariate_setting_options['use_demographics_index_year_month'],
useConditionOccurrence = self._temporal_covariate_setting_options['use_condition_occurrence'],
useConditionOccurrencePrimaryInpatient = self._temporal_covariate_setting_options['use_condition_occurrence_primary_inpatient'],
useConditionEraStart = self._temporal_covariate_setting_options['use_condition_era_start'],
useConditionEraOverlap = self._temporal_covariate_setting_options['use_condition_era_overlap'],
useConditionEraGroupStart = self._temporal_covariate_setting_options['use_condition_era_group_start'],
useConditionEraGroupOverlap = self._temporal_covariate_setting_options['use_condition_era_group_overlap'],
useDrugExposure = self._temporal_covariate_setting_options['use_drug_exposure'],
useDrugEraStart = self._temporal_covariate_setting_options['use_drug_era_start'],
useDrugEraOverlap = self._temporal_covariate_setting_options['use_drug_era_overlap'],
useDrugEraGroupStart = self._temporal_covariate_setting_options['use_drug_era_group_start'],
useDrugEraGroupOverlap = self._temporal_covariate_setting_options['use_drug_era_group_overlap'],
useProcedureOccurrence = self._temporal_covariate_setting_options['use_procedure_occurrence'],
useDeviceExposure = self._temporal_covariate_setting_options['use_device_exposure'],
useMeasurement = self._temporal_covariate_setting_options['use_measurement'],
useMeasurementValue = self._temporal_covariate_setting_options['use_measurement_value'],
useMeasurementRangeGroup = self._temporal_covariate_setting_options['use_measurement_range_group'],
useObservation = self._temporal_covariate_setting_options['use_observation'],
useCharlsonIndex = self._temporal_covariate_setting_options['use_charlson_index'],
useDcsi = self._temporal_covariate_setting_options['use_dcsi'],
useChads2 = self._temporal_covariate_setting_options['use_chads2'],
useChads2Vasc = self._temporal_covariate_setting_options['use_chads2_vasc'],
useHfrs = self._temporal_covariate_setting_options['use_hfrs'],
useDistinctConditionCount = self._temporal_covariate_setting_options['use_distinct_condition_count'],
useDistinctIngredientCount = self._temporal_covariate_setting_options['use_distinct_ingredient_count'],
useDistinctProcedureCount = self._temporal_covariate_setting_options['use_distinct_procedure_count'],
useDistinctMeasurementCount = self._temporal_covariate_setting_options['use_distinct_measurement_count'],
useDistinctObservationCount = self._temporal_covariate_setting_options['use_distinct_observation_count'],
useVisitCount = self._temporal_covariate_setting_options['use_visit_count'],
useVisitConceptCount = self._temporal_covariate_setting_options['use_visit_concept_count'],
temporalStartDays = ro.vectors.IntVector(self._temporal_covariate_setting_options['temporal_start_days']),
temporalEndDays = ro.vectors.IntVector(self._temporal_covariate_setting_options['temporal_end_days']),
includedCovariateConceptIds = ro.vectors.IntVector(self._temporal_covariate_setting_options['included_covariate_concept_ids']),
addDescendantsToInclude = self._temporal_covariate_setting_options['add_descendants_to_include'],
excludedCovariateConceptIds = ro.vectors.IntVector(self._temporal_covariate_setting_options['excluded_covariate_concept_ids']),
addDescendantsToExclude = self._temporal_covariate_setting_options['add_descendants_to_exclude'],
includedCovariateIds = ro.vectors.IntVector(self._temporal_covariate_setting_options['included_covariate_ids']))
except:
print("""
Error in constructing temporal covariate settings
""")
@property
def temporal_covariate_settings(self):
return self._temporal_covariate_settings
def __model_training_settings__(self, settings, **kwargs):
try:
print("""Setting model training options""")
if settings:
self._model_training_settings = settings['model_training_settings']
else:
self._model_training_settings ={
'sample_size': kwargs['sample_size'] if kwargs['sample_size'] else r('NULL'),
'val_size': kwargs['val_size'],
'random_state': kwargs['random_state'] if 'random_state' in kwargs.keys() else None,
}
except:
print("""Missing/incorrect model training settings. Specify model training parameters as follows:
sample_size: The sample size to be extracted from members of the cohort
val_size: The proportion of data to used for the training/validation split
random_state: Number to control shuffling applied to the data before applying the split
path: path for saving extracted data
""")
@property
def model_training_settings(self):
return self._model_training_settings
def __expt_config_settings__(self, settings, **kwargs):
try:
print("""Setting experiment config options""")
if settings:
self._expt_config_settings = settings['expt_config_settings']
else:
self._expt_config_settings = {
'categorical_covariate_concept_ids': kwargs['categorical_covariate_concept_ids'],
'numerical_covariate_concept_ids': kwargs['numerical_covariate_concept_ids'],
'categorical_covariate_concept_value_mappings': kwargs['categorical_covariate_concept_value_mappings'],
'normal_covariate_concept_values': kwargs['normal_covariate_concept_values']
}
except:
print("""Missing/incorrect experiment config options. Specify experiment config parameters as follows:
categorical_covariate_concept_ids: list of categorical covariate concept identifiers from OMOP CDM
numerical_covariate_concept_ids: list of numerical covariate concept identifiers from OMOP CDM
categorical_covariate_concept_value_mappings: dictionary of concept value mappings. Each key is a concept identifier from OMOP CDM and each value is a dictionary with feature value replacemnt mappings
normal_covariate_concept_values: user specified normal values for each concept
""")
@property
def expt_config_settings(self):
return self._expt_config_settings
def __get_plp_data__ (self):
"""
Gets patient level prediction data, an R object of type plpData, containing information on the cohorts, their outcomes, and baseline covariates
"""
print('Fetching plpData')
plp_data = patient_level_prediction.getPlpData(
connectionDetails = self._r_db_connection_details,
cdmDatabaseSchema = self._cohort_details['cdm_database_schema'],
oracleTempSchema = self._cohort_details['oracle_temp_schema'],
cohortDatabaseSchema = self._cohort_details['target_cohort_database_schema'],
cohortTable = self._cohort_details['target_cohort_table'],
cohortId = self._cohort_details['target_cohort_id'],
outcomeDatabaseSchema = self._cohort_details['outcome_cohort_database_schema'],
outcomeTable = self._cohort_details['outcome_cohort_table'],
outcomeIds = self._cohort_details['outcome_cohort_id'],
sampleSize = self._model_training_settings['sample_size'],
covariateSettings = self._baseline_covariate_settings
)
return plp_data
def __tidy_plp_covariates__(self, plp_data):
"""
Removes infrequent covariates, normalize, and remove redundancy
"""
print('Tidying plp covariates')
r("""
tidyPlpCovariates <- function(plp_data, minFraction = 0.1, normalize = True, removeRedundancy = True){
plp_data$covariateData = tidyCovariateData(plp_data$covariateData,
minFraction = minFraction,
normalize = normalize,
removeRedundancy = removeRedundancy)
return(plp_data)
}
""")
tidy_plp_covariates = r['tidyPlpCovariates']
tidy_plp_data = tidy_plp_covariates(plp_data,
minFraction = self._tidy_plp_settings['min_fraction'],
normalize = self._tidy_plp_settings['normalize'],
removeRedundancy = self._tidy_plp_settings['remove_redundancy'])
return tidy_plp_data
def __baseline_covariate_descriptions__(self, plp_data):
"""
Gets descriptions of baseline covariates from a plpData object
@param plp_data: An R object of type plpData, containing information on the cohorts, their outcomes, and baseline covariates
@return: a pandas dataframe describing the covariates that have been extracted
"""
print('Constructing baseline covariate descriptions')
r("""
getCovariateRefDataFrame <- function(plp_data){
return(data.frame(plp_data$covariateData$covariateRef))
}
""")
get_covariate_ref_data_frame = r['getCovariateRefDataFrame']
covariate_ref_df = get_covariate_ref_data_frame(plp_data )
condition_prefix = 'condition_occurrence any time prior through 0 days relative to index:'
covariate_ref_df['covariateName'] = [i.replace(condition_prefix,'') for i in covariate_ref_df['covariateName']]
return covariate_ref_df
def __baseline_covariate_data__(self, plp_data):
"""
Gets baseline covariates for each subject from a plpData R object as a pandas dataframe in the wide format
"""
print('Constructing baseline covariates')
r("""
getCovariateDataFrame <- function(plp_data){
target_cohort <- data.frame(plp_data$cohorts[,c('rowId','subjectId')])
covariate_ref_df <- data.frame(plp_data$covariateData$covariateRef)
covariates_df_long <- data.frame(plp_data$covariateData$covariates)
covariates_df_long <- merge(covariates_df_long, covariate_ref_df,by ='covariateId')
covariates_df_long <- merge(target_cohort, covariates_df_long, by='rowId')
return(covariates_df_long[,c('subjectId','covariateName','covariateValue')])
}
""")
get_covariate_data_frame = r['getCovariateDataFrame']
df_long = get_covariate_data_frame(plp_data)
condition_prefix = 'condition_occurrence any time prior through 0 days relative to index:'
df_long['covariateName'] = [i.replace(condition_prefix,'') for i in df_long['covariateName']]
baseline_covariate_data = df_long.pivot_table(index= 'subjectId', columns = 'covariateName', values = 'covariateValue').fillna(0).reset_index()
return baseline_covariate_data
def __target_cohort_subject_ids__ (self, plp_data):
"""
Gets target cohort subject ids
@param plp_data: An R object of type plpData, containing information on the cohorts, their outcomes, and baseline covariates
@return: A list of subject ids in the target cohort sample
"""
r("""
getTargetCohortSubjectIds <- function(plp_data){
return(plp_data$cohorts$subjectId)
}
""")
print('Fetching list of subject ids in the target cohort sample')
get_target_cohort_subject_ids = r['getTargetCohortSubjectIds']
subject_ids = get_target_cohort_subject_ids(plp_data)
subject_ids = [int(i) for i in subject_ids]
return subject_ids
def __r_temporal_covariate_data__(self, subject_ids):
"""
Extracts temporal covariate data for the subjects in a target cohort using a custom covariate builder
@param subject_ids: Ids of subjects in the sample
return: An R object of type CovariateData, containing information on temporal covariates
"""
r("""
getTemporalCovariateData <- function(hostname,
port,
dbname,
user,
password,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
cohortId,
subjectIds,
covariateSettings,
path) {
writeLines("Constructing temporal covariates")
if (length(covariateSettings$includedCovariateConceptIds) == 0) {
return(NULL)
}
# SQL to fetch the covariate data:
sql <- sprintf(paste("SELECT c.subject_id as subject_id,",
"v.visit_detail_id as stay_id,",
"v.visit_start_datetime as stay_start,",
"v.visit_end_datetime as stay_end,",
"EXTRACT(EPOCH FROM (m.measurement_datetime - v.visit_start_datetime))/3600 as hours,",
"m.measurement_concept_id as covariate_id,",
"m.value_as_number as covariate_value",
"FROM (SELECT *",
"FROM %s.%s c",
"WHERE c.cohort_definition_id = %s",
"AND c.subject_id IN (%s)) c,",
"%s.visit_detail v,",
"(SELECT *",
"FROM %s.measurement m",
"WHERE m.person_id IN (%s)",
"AND m.measurement_concept_id IN (%s)) m",
"WHERE c.subject_id = v.person_id",
"AND c.subject_id = m.person_id",
"AND v.visit_detail_id = m.visit_detail_id",
"AND m.measurement_datetime >= v.visit_start_datetime",
"AND m.measurement_datetime <= v.visit_start_datetime + INTERVAL \'2 day\'"
), cohortDatabaseSchema,
cohortTable,
cohortId,
paste(subjectIds, collapse = ", "),
cdmDatabaseSchema,
cdmDatabaseSchema,
paste(subjectIds, collapse = ", "),
paste(covariateSettings$includedCovariateConceptIds, collapse = ", "))
path_to_covariate_data <-paste(path,'covariates.csv',sep='')
command <- sprintf("%scopy (%s) to %s with csv header","\\\\",sql, path_to_covariate_data)
#execute
system(sprintf('PGPASSWORD=%s psql -h %s -p %s -d %s -U %s -c "%s"',
password,
hostname,
port,
dbname,
user,
command))
# Retrieve the covariates:
covariates <- read.csv(path_to_covariate_data)
colnames(covariates) <- gsub('_','',gsub("(_[a-z])","\\\\U\\\\1",colnames(covariates),perl=TRUE))
covariates$stayStart <- as.character(covariates$stayStart)
covariates$stayEnd <- as.character(covariates$stayEnd)
# SQL to fetch covariate reference:
sql2 <- sprintf(paste(
"SELECT c.concept_id as covariate_id,",
"c.concept_name as covariate_name",
"FROM %s.concept c",
"WHERE c.concept_id IN (%s)"
),cdmDatabaseSchema,
paste(covariateSettings$includedCovariateConceptIds, collapse = ", "))
path_to_covariate_refs <-paste(path,'covariate_references.csv',sep='')
command2 <- sprintf("%scopy (%s) to %s with csv header","\\\\",sql2, path_to_covariate_refs)
#execute
system(sprintf('PGPASSWORD=%s psql -h %s -p %s -d %s -U %s -c "%s"',
password,
hostname,
port,
dbname,
user,
command2))
covariateRef <- read.csv(path_to_covariate_refs)
colnames(covariateRef) <- gsub('_','',gsub("(_[a-z])","\\\\U\\\\1",colnames(covariateRef),perl=TRUE))
# Construct analysis reference:
analysisRef <- data.frame(analysisId = 1,
analysisName = "Selected Temporal Covariates",
domainId = "Measurement",
startDay = 0,
endDay = 0,
isBinary = "N",
missingMeansZero = "N")
# Construct analysis reference:
metaData <- list(sql = sql, call = match.call())
result <- Andromeda::andromeda(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef)
attr(result, "metaData") <- metaData
class(result) <- "CovariateData"
return(result)
}
""")
path = self._working_directory + '/data'
if not os.path.exists(path):
os.makedirs(path)
get_temporal_covariate_data = r['getTemporalCovariateData']
r_temporal_covariate_data = get_temporal_covariate_data(hostname = self._db_connection_details['hostname'],
port = self._db_connection_details['port'],
dbname = self._db_connection_details['dbname'],
user = self._db_connection_details['user'],
password = self._db_connection_details['password'],
cdmDatabaseSchema = self._cohort_details['cdm_database_schema'],
cohortDatabaseSchema = self._cohort_details['target_cohort_database_schema'],
cohortTable = self._cohort_details['target_cohort_table'],
cohortId = self._cohort_details['target_cohort_id'],
subjectIds = subject_ids,
covariateSettings = self._temporal_covariate_settings,
path = path)
return r_temporal_covariate_data
def __temporal_covariate_data_long__(self, r_temporal_covariate_data):
"""
Gets temporal covariates for each subject as a pandas dataframe in the wide format
"""
r("""
getTemporalCovariateDataLong <- function(temporal_covariate_data){
temporal_covariate_ref_df <- temporal_covariate_data$covariateRef
temporal_covariates_df_long <- temporal_covariate_data$covariates
temporal_covariates_df_long <-merge(temporal_covariates_df_long,temporal_covariate_ref_df, by ='covariateId')
return(temporal_covariates_df_long[,c('subjectId','stayId','stayStart', 'stayEnd','hours', 'covariateId', 'covariateName','covariateValue')])
}
""")
print('Fetching temporal covariates as a pandas dataframe in the long format')
get_temporal_covariate_data_long = r['getTemporalCovariateDataLong']
df_long = get_temporal_covariate_data_long(r_temporal_covariate_data)
df_long['stayId'] = df_long.apply(lambda x: str(int(x.subjectId))+'_'+ str(int(x.stayId)), axis =1)
return df_long
def __covariate_value_mapping__(self, df):
"""
Remaps covariate values using provided mappings
"""
mappings = dict()
for key in self._expt_config_settings['categorical_covariate_concept_value_mappings'].keys():
if key in self._temporal_covariate_names.keys():
mappings[self._temporal_covariate_names[key]] = self._expt_config_settings['categorical_covariate_concept_value_mappings'][key]
return df.replace(mappings)
def __temporal_covariate_data_wide__(self, df_long):
"""
Gets temporal covariates for each subject as a pandas dataframe in the wide format
"""
print('Constructing temporal covariates for each subject as a pandas dataframe in the wide format')
df_wide = df_long.pivot_table(index = ['subjectId','stayId','hours'], columns = 'covariateName', values = 'covariateValue').reset_index()
df_wide.columns.name = None
df_wide = df_wide.sort_values(by=['stayId', 'hours'],ascending=True)
df_wide['seqnum'] = df_wide.groupby(['stayId']).cumcount()
id_cols = ['subjectId','stayId','seqnum','hours']
covar_cols = sorted(list(set(df_wide.columns) - set(id_cols)))
df_wide = df_wide[id_cols + covar_cols]
df_wide = self.__covariate_value_mapping__(df_wide)
return df_wide
def __outcomes_dict__(self, plp_data):
"""
Gets outcomes from a plpData R object
@param plp_data: An R object of type plpData, containing information on the cohorts, their outcomes, and baseline covariates
@return: A python dictionary where the key is the subject id, and the value is the date of the outcome
"""
print('Constructing outcomes dictionary')
r("""
getOutcomeData <- function(plp_data){
target_cohort <- data.frame(plp_data$cohorts[,c('rowId','subjectId','cohortStartDate')])
outcome_cohort <- data.frame(plp_data$outcomes[,c('rowId','daysToEvent')])
outcome_cohort <- merge(x= target_cohort,y =outcome_cohort,by ='rowId')
outcome_cohort$y_true_date <- outcome_cohort$cohortStartDate + outcome_cohort$daysToEvent
outcome_cohort$y_true_date<- as.character(outcome_cohort$y_true_date)
return(outcome_cohort[,c('subjectId','y_true_date')])
}
""")
get_outcome_data = r['getOutcomeData']
outcome_df = get_outcome_data (plp_data)
outcome_dict = dict(zip(outcome_df['subjectId'].astype(int),pd.to_datetime(outcome_df['y_true_date'], format='%Y-%m-%d')))
return outcome_dict
def __outcomes_per_stay__(self, df_long, df_wide, outcome_dict):
"""
Gets outcomes per stay for each subject
@param df_long: A pandas dataframe with the covariate observations for each person in the long format
@param df_wide: A pandas dataframe with the temporal covariate observations for each person in the wide format
@param outcome_dict: A python dictionary where the key is the subject id, and the value is the date of the outcome
@return: A pandas dataframe with outcomes per stay per subject
"""
print('Constructing outcomes per stay per subject')
outcome_per_stay = df_long[['subjectId','stayId','stayStart', 'stayEnd']].drop_duplicates()
outcome_per_stay = outcome_per_stay[outcome_per_stay['stayId'].isin(set(df_wide['stayId']))]
outcome_per_stay['stayStart'] = pd.to_datetime(outcome_per_stay['stayStart'])
outcome_per_stay['stayEnd'] = pd.to_datetime(outcome_per_stay['stayEnd'])
outcome_per_stay['y_true'] = 0
for subject in outcome_dict.keys():
outcome_date = pd.to_datetime(outcome_dict[subject])
outcome_per_stay.loc[((outcome_per_stay['subjectId']==subject) &
(outcome_per_stay['stayStart'] <= outcome_date) &
(outcome_per_stay['stayEnd'] >= outcome_date)), 'y_true'] = 1
return outcome_per_stay
def __train_val_split__(self, temporal_covariate_data, outcome_per_stay, baseline_covariate_data):
print('Splitting data')
person_level_data = outcome_per_stay[['subjectId','y_true']].drop_duplicates()
X = person_level_data['subjectId']
y = person_level_data['y_true']
X_train, X_val, _, _ = train_test_split( X,
y,
test_size=self._model_training_settings['val_size'],
random_state= self._model_training_settings['random_state'],
stratify=y)
train_subjects = sorted(X_train)
val_subjects = sorted(X_val)
X_train_data = temporal_covariate_data[temporal_covariate_data['subjectId'].isin(train_subjects)]
X_train_data = X_train_data.drop('subjectId',1)
y_train_data = outcome_per_stay[outcome_per_stay['subjectId'].isin(train_subjects)]
y_train_data = y_train_data[['stayId', 'y_true']]
X_val_data = temporal_covariate_data[temporal_covariate_data['subjectId'].isin(val_subjects)]
X_val_data = X_val_data.drop('subjectId',1)
y_val_data = outcome_per_stay[outcome_per_stay['subjectId'].isin(val_subjects)]
y_val_data = y_val_data[['stayId', 'y_true']]
X_train_baseline = baseline_covariate_data[baseline_covariate_data['subjectId'].isin(train_subjects)]
X_val_baseline = baseline_covariate_data[baseline_covariate_data['subjectId'].isin(val_subjects)]
return X_train_data, X_val_data, y_train_data, y_val_data, X_train_baseline, X_val_baseline
def __file_name__(self,
typ,
dset
):
filename = '{}_T{}_O{}_{}_{}.csv'.format(self._analysis_name.upper(),
self._cohort_details['target_cohort_id'],
self._cohort_details['outcome_cohort_id'],
typ.upper(),
dset.upper())
return filename
def __domains__(self, df, covariate_names, categorical_covariates):
"""
Get domains of categorical covariates
"""
df = df[['covariateId','covariateValue']].drop_duplicates().dropna()
dictionary = dict()
for covariate in categorical_covariates:
covariate_name = covariate_names[covariate]
covariate_values = list(df['covariateValue'][df['covariateId'] == covariate])
covariate_value_mapping = self._expt_config_settings['categorical_covariate_concept_value_mappings'][covariate]
covariate_value_names = [covariate_value_mapping[i] for i in covariate_values]
dictionary[covariate_name] = covariate_value_names
return dictionary
def __yaml_doc__(self,
df,
train_tgt_file = 'data/y_train',
train_feat_file = 'data/X_train',
train_baseline_feat_file = 'data/X_train_baseline',
val_tgt_file = 'data/y_val',
val_feat_file = 'data/X_val',
val_baseline_feat_file = 'data/X_val_baseline',
tgt_col = 'y_true',
idx_cols = 'stayId',
time_order_col = ['hours', 'seqnum'],
feat_cols = None,
numerical = list(),
normal_values = dict()
):
expt_config = dict()
expt_config['tgt_col'] = tgt_col
expt_config['idx_cols'] = idx_cols
expt_config['time_order_col'] = time_order_col
expt_config['feat_cols'] = feat_cols
expt_config['train'] = dict()
expt_config['train']['tgt_file'] = train_tgt_file
expt_config['train']['feat_file'] = train_feat_file
expt_config['train']['baseline_feat_file'] = train_baseline_feat_file
expt_config['val'] = dict()
expt_config['val']['tgt_file'] = val_tgt_file
expt_config['val']['feat_file'] = val_feat_file
expt_config['val']['baseline_feat_file'] = val_baseline_feat_file
categorical_covariates = sorted(list(set(self._expt_config_settings['categorical_covariate_concept_ids'] ).intersection(set(self._temporal_covariate_names.keys()))))
expt_config['category_map'] = self.__domains__(df, self._temporal_covariate_names, categorical_covariates)
numerical_covariates = sorted(list(set(self._expt_config_settings['numerical_covariate_concept_ids'] ).intersection(set(self._temporal_covariate_names.keys()))))
expt_config['numerical'] = sorted([self._temporal_covariate_names[i] for i in numerical_covariates])
normal_value_covariate_ids = sorted(list(set(self._expt_config_settings['normal_covariate_concept_values'].keys()).intersection(set(self._temporal_covariate_names.keys()))))
normal_values = dict()
for covariate in normal_value_covariate_ids:
normal_values[self._temporal_covariate_names[covariate]] = self._expt_config_settings['normal_covariate_concept_values'][covariate]
expt_config['normal_values'] = normal_values
expt_config_filename = '{}_T{}_O{}_expt_config.yaml'.format(self._analysis_name.upper(),
self._cohort_details['target_cohort_id'],
self._cohort_details['outcome_cohort_id']
)
with open(expt_config_filename, 'w') as outfile:
yaml.dump(expt_config, outfile, default_flow_style=False, sort_keys=False)
print('Experiment configurations saved to {}'.format(expt_config_filename))
def __output_dir__(self):
path = os.path.join(self._working_directory, self._output_directory)
if not os.path.exists(path):
os.makedirs(path)
return Path(path)
def __training_setup__(self):
print('Extracting training features')
plp_data = self.__get_plp_data__()
tidy_plp_data = self.__tidy_plp_covariates__(plp_data)
baseline_covariate_descriptions = self.__baseline_covariate_descriptions__(tidy_plp_data)
baseline_covariate_data = self.__baseline_covariate_data__(tidy_plp_data)
target_cohort_subject_ids = self.__target_cohort_subject_ids__(tidy_plp_data)
r_temporal_covariate_data = self.__r_temporal_covariate_data__(target_cohort_subject_ids)
temporal_covariate_data_long = self.__temporal_covariate_data_long__(r_temporal_covariate_data)
self._temporal_covariate_names = | pd.Series(temporal_covariate_data_long.covariateName.values, index=temporal_covariate_data_long.covariateId) | pandas.Series |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_format_converters(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, "D")
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)]
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, "ns")
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == iNaT + 1
assert max_td.value == lib.i8max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
min_td - | Timedelta(2, "ns") | pandas.Timedelta |
from typing import Any, Callable, Dict, Iterator, List, Optional, Union
from IPython.display import HTML, Image
import pydot
from collections import Counter
import json
import os
import re
from copy import deepcopy
from pathlib import Path
import lunchbox.tools as lbt
from pandas import DataFrame
import networkx
import rolling_pin.tools as tools
# ------------------------------------------------------------------------------
'''
Contains the BlobETL class, which is used for coverting JSON blobs, and their
python equivalents, into flat dictionaries that can easily be modified and
converted to directed graphs.
'''
class BlobETL():
'''
Converts blob data internally into a flat dictionary that is universally
searchable, editable and convertable back to the data's original structure,
new blob structures or dircted graphs.
'''
def __init__(self, blob, separator='/'):
# type: (Any, str) -> None
'''
Contructs BlobETL instance.
Args:
blob (object): Iterable object.
separator (str, optional): String to be used as a field separator in
each key. Default: '/'.
'''
self._data = tools \
.flatten(blob, separator=separator, embed_types=True) # type: Dict[str, Any]
self._separator = separator # type: str
# EDIT_METHODS--------------------------------------------------------------
def query(self, regex, ignore_case=True):
# type: (str, bool) -> BlobETL
'''
Filter data items by key according to given regular expression.
Args:
regex (str): Regular expression.
ignore_casd (bool, optional): Whether to consider case in the
regular expression search. Default: False.
Returns:
BlobETL: New BlobETL instance.
'''
if ignore_case:
return self.filter(lambda x: bool(re.search(regex, x, re.I)), by='key')
return self.filter(lambda x: bool(re.search(regex, x)), by='key')
def filter(self, predicate, by='key'):
# type: (Callable[[Any], bool], str) -> BlobETL
'''
Filter data items by key, value or key + value, according to a given
predicate.
Args:
predicate: Function that returns a boolean value.
by (str, optional): Value handed to predicate.
Options include: key, value, key+value. Default: key.
Raises:
ValueError: If by keyword is not key, value, or key+value.
Returns:
BlobETL: New BlobETL instance.
'''
data = {}
if by not in ['key', 'value', 'key+value']:
msg = f'Invalid by argument: {by}. Needs to be one of: '
msg += 'key, value, key+value.'
raise ValueError(msg)
for key, val in self._data.items():
item = None
if by == 'key':
item = [key]
elif by == 'value':
item = [val]
else:
item = [key, val]
if predicate(*item):
data[key] = val
return BlobETL(data, separator=self._separator)
def delete(self, predicate, by='key'):
# type: (Callable[[Any], bool], str) -> BlobETL
'''
Delete data items by key, value or key + value, according to a given
predicate.
Args:
predicate: Function that returns a boolean value.
by (str, optional): Value handed to predicate.
Options include: key, value, key+value. Default: key.
Raises:
ValueError: If by keyword is not key, value, or key+value.
Returns:
BlobETL: New BlobETL instance.
'''
data = deepcopy(self._data)
if by not in ['key', 'value', 'key+value']:
msg = f'Invalid by argument: {by}. Needs to be one of: '
msg += 'key, value, key+value.'
raise ValueError(msg)
for key, val in self._data.items():
item = None
if by == 'key':
item = [key]
elif by == 'value':
item = [val]
else:
item = [key, val]
if predicate(*item):
del data[key]
return BlobETL(data, separator=self._separator)
def set(
self,
predicate=None, # type: Optional[Callable[[Any, Any], bool]]
key_setter=None, # type: Optional[Callable[[Any, Any], str]]
value_setter=None, # type: Optional[Callable[[Any, Any], Any]]
):
# type: (...) -> BlobETL
'''
Filter data items by key, value or key + value, according to a given
predicate. Then set that items key by a given function and value by a
given function.
Args:
predicate (function, optional): Function of the form:
lambda k, v: bool. Default: None --> lambda k, v: True.
key_setter (function, optional): Function of the form:
lambda k, v: str. Default: None --> lambda k, v: k.
value_setter (function, optional): Function of the form:
lambda k, v: object. Default: None --> lambda k, v: v.
Returns:
BlobETL: New BlobETL instance.
'''
# assign default predicate
if predicate is None:
predicate = lambda k, v: True
# assign default key_setter
if key_setter is None:
key_setter = lambda k, v: k
# assign default value_setter
if value_setter is None:
value_setter = lambda k, v: v
data = deepcopy(self._data)
for item in self._data.items():
if predicate(*item):
k = key_setter(*item)
v = value_setter(*item)
del data[item[0]]
data[k] = v
return BlobETL(data, separator=self._separator)
def update(self, item):
# type: (Union[Dict, BlobETL]) -> BlobETL
'''
Updates internal dictionary with given dictionary or BlobETL instance.
Given dictionary is first flattened with embeded types.
Args:
item (dict or BlobETL): Dictionary to be used for update.
Returns:
BlobETL: New BlobETL instance.
'''
if isinstance(item, BlobETL):
item = item._data
temp = tools.flatten(item, separator=self._separator, embed_types=True)
data = deepcopy(self._data)
data.update(temp)
return BlobETL(data, separator=self._separator)
def set_field(self, index, field_setter):
# type: (int, Callable[[str], str]) -> BlobETL
'''
Set's a field at a given index according to a given function.
Args:
index (int): Field index.
field_setter (function): Function of form lambda str: str.
Returns:
BlobETL: New BlobETL instance.
'''
output = {}
for key, val in self._data.items():
fields = key.split(self._separator)
fields[index] = field_setter(fields[index])
key = self._separator.join(fields)
output[key] = val
return BlobETL(output, separator=self._separator)
# EXPORT-METHODS------------------------------------------------------------
def to_dict(self):
# type: () -> Dict[str, Any]
'''
Returns:
dict: Nested representation of internal data.
'''
return tools.unembed(
tools.nest(deepcopy(self._data), separator=self._separator)
)
def to_flat_dict(self):
# type: () -> Dict[str, Any]
'''
Returns:
dict: Flat dictionary with embedded types.
'''
return deepcopy(self._data)
def to_records(self):
# type: () -> List[Dict]
'''
Returns:
list[dict]: Data in records format.
'''
data = []
for key, val in self._data.items():
fields = key.split(self._separator)
row = {i: v for i, v in enumerate(fields)} # type: Dict[Any, Any]
row['value'] = val
data.append(row)
return data
def to_dataframe(self, group_by=None):
# type: (Optional[int]) -> DataFrame
'''
Convert data to pandas DataFrame.
Args:
group_by (int, optional): Field index to group rows of data by.
Default: None.
Returns:
DataFrame: DataFrame.
'''
data = self.to_records() # type: Any
data = DataFrame(data)
if group_by is not None:
group = list(range(0, group_by))
data = DataFrame(data)\
.groupby(group, as_index=False)\
.agg(lambda x: x.tolist())\
.apply(lambda x: x.to_dict(), axis=1)\
.tolist()
data = DataFrame(data)
# clean up column order
cols = data.columns.tolist() # type: List[str]
cols = list(sorted(filter(lambda x: x != 'value', cols)))
cols += ['value']
data = data[cols]
return data
def to_prototype(self):
# type: () -> BlobETL
'''
Convert data to prototypical representation.
Example:
>>> data = {
'users': [
{
'name': {
'first': 'tom',
'last': 'smith',
}
},{
'name': {
'first': 'dick',
'last': 'smith',
}
},{
'name': {
'first': 'jane',
'last': 'doe',
}
},
]
}
>>> BlobETL(data).to_prototype().to_dict()
{
'^users': {
'<list_[0-9]+>': {
'name': {
'first$': Counter({'dick': 1, 'jane': 1, 'tom': 1}),
'last$': Counter({'doe': 1, 'smith': 2})
}
}
}
}
Returns:
BlobETL: New BlobETL instance.
'''
def regex_in_list(regex, items):
# type: (str, List[str]) -> bool
for item in items:
if re.search(regex, item):
return True
return False # pragma: no cover
def field_combinations(a, b):
# type: (List[str], List[str]) -> List[str]
output = []
for fa in a:
for fb in b:
output.append(fa + self._separator + fb)
return output
keys = list(self._data.keys())
fields = list(map(lambda x: x.split(self._separator), keys))
fields = | DataFrame(fields) | pandas.DataFrame |
import os, sys
import timeit
import pandas as pd
#from dict_csv import *
import csv
import matplotlib.pyplot as plt
'''
data loading and preview
'''
start_time = timeit.default_timer()
# data loading using pandas
# show data sketch
# with open("../../data/fresh_comp_offline/tianchi_fresh_comp_train_user.csv", 'r') as data_file_user:
# chunks_user = pd.read_csv(data_file_user, iterator = True))
# with open("../../data/fresh_comp_offline/tianchi_fresh_comp_train_item.csv", mode = 'r') as data_file_item:
# chunks_item = pd.read_csv(data_file_item, iterator = True)
# chunk_user = chunks_user.get_chunk(5)
# chunk_item = chunks_item.get_chunk(5)
# print(chunk_user)
# print(chunk_item)
'''
data pre_analysis
'''
################################
# calculation of CTR
################################
# count_all = 0
# count_4 = 0 # the count of behavior_type = 4
# for df in pd.read_csv(open(r"tianchi_fresh_comp_train_user.csv", "r"),
# chunksize = 100000):
# try:
# count_user = df['behavior_type'].value_counts()
# count_all += count_user[1]+count_user[2]+count_user[3]+count_user[4]
# count_4 += count_user[4]
# except StopIteration:
# print("Iteration is stopped.")
# break
# # CTR
# ctr = count_4 / count_all
# print(ctr)
'''dict to csv'''
def csv2dict(csv_file, key, value):
new_dict = {}
with open(csv_file, 'r')as f:
reader = csv.reader(f, delimiter=',')
# fieldnames = next(reader)
# reader = csv.DictReader(f, fieldnames=fieldnames, delimiter=',')
for row in reader:
new_dict[row[key]] = row[value]
return new_dict
# convert csv file to dict(key-value pairs each row)
def row_csv2dict(csv_file=""):
new_dict = {}
with open(csv_file)as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
new_dict[row[0]] = row[1]
return new_dict
'''
convert dict to csv file
'''
# convert dict to csv file(key-value pairs each column)
def dict2csv(raw_dict={}, csv_file=""):
with open(csv_file, 'w') as f:
w = csv.writer(f)
# write all keys on one row and all values on the next
w.writerow(raw_dict.keys())
w.writerow(raw_dict.values())
# convert dict to csv file(key-value 1-1 pairs each row)
def row_dict2csv(raw_dict={}, csv_file=""):
with open(csv_file, 'w') as f:
w = csv.writer(f)
w.writerows(raw_dict.items())
# convert dict to csv file(key-[value] 1-M pairs each row)
def row2_dict2csv(raw_dict={}, csv_file=""):
with open(csv_file, 'w') as f:
w = csv.writer(f)
for k, v in raw_dict.items():
w.writerows([k, v])
'''
visualization month record based on date(11-18->12-18)
'''
# count_day = {} # using dictionary for date-count pairs
# for i in range(31): # for speed up the program, initial dictionary here
# if i <= 12:
# date = '2014-11-%d' % (i + 18)
# else:
# date = '2014-12-%d' % (i - 12)
# count_day[date] = 0
#
# batch = 0
# dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H')
# for df in pd.read_csv(("tianchi_fresh_comp_train_user.csv"),
# parse_dates=['time'], index_col=['time'], date_parser=dateparse,
# chunksize=100000):
# try:
# for i in range(31):
# if i <= 12:
# date = '2014-11-%d' % (i + 18)
# else:
# date = '2014-12-%d' % (i - 12)
# count_day[date] += df[date].shape[0]
# batch += 1
# print('chunk %d done.' % batch)
#
# except StopIteration:
# print("finish data process")
# break
#
# row_dict2csv(count_day, "count_day.csv")
#
# df_count_day = pd.read_csv("count_day.csv",
# header=None,
# names=['time', 'count'])
# # x_day = df_count_day.index.get_values()
# df_count_day = df_count_day.set_index('time')
# # x_date = df_count_day.index.get_values()
# # y = df_count_day['count'].get_values()
#
# df_count_day['count'].plot(kind='bar')
# plt.legend(loc='best')
# plt.grid(True)
# plt.show()
'''visualization month record based on date(11-18->12-18)
for item_id in P'''
# count_day = {} # using dictionary for date-count pairs
# for i in range(31): # for speed up the program, initial dictionary here
# if i <= 12:
# date = '2014-11-%d' % (i + 18)
# else:
# date = '2014-12-%d' % (i - 12)
# count_day[date] = 0
#
# batch = 0
# dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H')
#
# df_P = pd.read_csv('tianchi_fresh_comp_train_item.csv', index_col=False)
#
# for df in pd.read_csv('tianchi_fresh_comp_train_user.csv',
# parse_dates=['time'], index_col=['time'], date_parser=dateparse,
# chunksize=100000):
# try:
# df = pd.merge(df.reset_index(), df_P, on=['item_id']).set_index('time')
#
# for i in range(31):
# if i <= 12:
# date = '2014-11-%d' % (i + 18)
# else:
# date = '2014-12-%d' % (i - 12)
# count_day[date] += df[date].shape[0]
# batch += 1
# print('chunk %d done.' % batch)
#
# except StopIteration:
# print("finish data process")
# break
#
#
# row_dict2csv(count_day, 'count_day_of_P.csv')
#
# df_count_day = pd.read_csv('count_day_of_P.csv',
# header=None,
# names=['time', 'count'])
#
#
# # x_day = df_count_day.index.get_values()
# df_count_day = df_count_day.set_index('time')
# # x_date = df_count_day.index.get_values()
# # y = df_count_day['count'].get_values()
#
# df_count_day['count'].plot(kind='bar')
# plt.legend(loc='best')
# plt.title('behavior count of P by date')
# plt.grid(True)
# plt.show()
'''visualization based on hour(e.g. 12-17-18)'''
# count_hour_1217 = {}
# count_hour_1218 = {}
# for i in range(24):
# time_str17 = '2014-12-17 %02.d' % i
# time_str18 = '2014-12-18 %02.d' % i
# count_hour_1217[time_str17] = [0, 0, 0, 0]
# count_hour_1218[time_str18] = [0, 0, 0, 0]
# batch = 0
# dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H')
# for df in pd.read_csv('tianchi_fresh_comp_train_user.csv',
# parse_dates=['time'],
# index_col=['time'],
# date_parser=dateparse,
# chunksize=50000):
# try:
# for i in range(24):
# time_str17 = '2014-12-17 %02.d' % i
# time_str18 = '2014-12-18 %02.d' % i
# tmp17 = df[time_str17]['behavior_type'].value_counts()
# tmp18 = df[time_str18]['behavior_type'].value_counts()
# for j in range(len(tmp17)):
# count_hour_1217[time_str17][tmp17.index[j] - 1] += tmp17[tmp17.index[j]]
# for j in range(len(tmp18)):
# count_hour_1218[time_str18][tmp18.index[j] - 1] += tmp18[tmp18.index[j]]
# batch += 1
# print('chunk %d done' % batch)
#
# except StopIteration:
# print('finish data process')
# break
# df_1217 = pd.DataFrame.from_dict(count_hour_1217, orient='index')
# df_1218 = pd.DataFrame.from_dict(count_hour_1218, orient='index')
# # df_1217 = pd.read_csv('count_hour17.csv', index_col=0)
# # df_1218 = pd.read_csv('count_hour18.csv', index_col=0)
#
# df_1718 = pd.concat([df_1217, df_1218])
# f1 = plt.figure(1)
# df_1718.plot(kind='bar')
# plt.legend(loc='best')
# plt.grid(True)
# plt.show()
#
# f2 = plt.figure(2)
# df_1718[3].plot(kind='bar', color = 'r')
# plt.legend(loc='best')
# plt.grid(True)
# plt.show()
'''
user behavior analysis
'''
user_list = [10001082,
10496835,
107369933,
108266048,
10827687,
108461135,
110507614,
110939584,
111345634,
111699844]
user_count = {}
for i in range(10):
user_count[user_list[i]] = [0, 0, 0, 0] # key-value value = count of 4 types of behaviors
batch = 0 # for process printing
for df in pd.read_csv('tianchi_fresh_comp_train_user.csv',
chunksize=100000,
index_col=['user_id']):
try:
for i in range(10):
tmp = df[df.index == user_list[i]]['behavior_type'].value_counts()
for j in range(len(tmp)):
user_count[user_list[i]][tmp.index[j] - 1] += tmp[tmp.index[j]]
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("Iteration is stopped.")
break
# storing the count result
df_user_count = | pd.DataFrame.from_dict(user_count, orient='index', columns=['浏览', '收藏', '加购物车', '购买']) | pandas.DataFrame.from_dict |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
from covid_epidemiology import model
from covid_epidemiology.src.models.shared import model_utils
class ModelUtilsTest(unittest.TestCase):
def test_update_metric_to_predictions(self):
metric_to_predictions = {
"Infected": [
model.Prediction(
time_horizon=1, predicted_value=20, ground_truth=12)
]
}
gt_data = [7, 2, 5, 7]
got = model_utils.update_metric_to_predictions(
metric="recovered_cases",
values=np.array([[12], [23]], np.float32),
metric_to_predictions=metric_to_predictions,
train_end_of_window=2,
gt_data=gt_data)
wanted = {
"Infected": [
model.Prediction(
time_horizon=1, predicted_value=20, ground_truth=12)
],
"recovered_cases": [
model.Prediction(
time_horizon=1, predicted_value=12, ground_truth=5),
model.Prediction(
time_horizon=2, predicted_value=23, ground_truth=7)
],
}
self.assertEqual(wanted, got)
def test_update_metric_to_predictions_with_quantiles(self):
metric_to_predictions = {}
gt_data = [7, 2, 5, 7]
got = model_utils.update_metric_to_predictions(
metric="recovered_cases",
values=np.array([[12, 14], [23, 25]], np.float32),
metric_to_predictions=metric_to_predictions,
train_end_of_window=2,
gt_data=gt_data,
quantiles=[0.1, 0.9],
metric_string_format="{metric}_{quantile}_quantile")
wanted = {
"recovered_cases_0.1_quantile": [
model.Prediction(
time_horizon=1, predicted_value=12, ground_truth=5),
model.Prediction(
time_horizon=2, predicted_value=23, ground_truth=7)
],
"recovered_cases_0.9_quantile": [
model.Prediction(
time_horizon=1, predicted_value=14, ground_truth=5),
model.Prediction(
time_horizon=2, predicted_value=25, ground_truth=7)
],
}
self.assertEqual(wanted, got)
def test_update_metric_to_predictions_offset(self):
metric_to_predictions = {
"Infected": [
model.Prediction(
time_horizon=1, predicted_value=20, ground_truth=12)
]
}
gt_data = [7, 2, 5, 7]
got = model_utils.update_metric_to_predictions(
metric="recovered_cases",
values=np.array([[12], [23]], np.float32),
metric_to_predictions=metric_to_predictions,
train_end_of_window=2,
gt_data=gt_data,
time_horizon_offset=2)
wanted = {
"Infected": [
model.Prediction(
time_horizon=1, predicted_value=20, ground_truth=12)
],
"recovered_cases": [
model.Prediction(
time_horizon=-1, predicted_value=12, ground_truth=5),
model.Prediction(
time_horizon=0, predicted_value=23, ground_truth=7)
],
}
self.assertEqual(wanted, got)
def test_populate_gt_list(self):
gt_list = np.zeros((2, 4))
gt_indicator = np.ones((2, 4))
location_to_gt = {
"IR": pd.Series([12, 4, 5]),
"US": | pd.Series([3, 4, 5, 6, 7]) | pandas.Series |
from metagraph import translator
from metagraph.plugins import has_pandas, has_networkx, has_scipy
if has_pandas:
from .types import PandasEdgeMap, PandasEdgeSet
@translator
def edgemap_to_edgeset(x: PandasEdgeMap, **props) -> PandasEdgeSet:
return PandasEdgeSet(
x.value, x.src_label, x.dst_label, is_directed=x.is_directed
)
if has_pandas and has_scipy:
import pandas as pd
from ..scipy.types import ScipyEdgeMap, ScipyEdgeSet
@translator
def edgemap_from_scipy(x: ScipyEdgeMap, **props) -> PandasEdgeMap:
is_directed = ScipyEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})[
"is_directed"
]
coo_matrix = x.value.tocoo()
row_ids = x.node_list[coo_matrix.row]
column_ids = x.node_list[coo_matrix.col]
weights = coo_matrix.data
if not is_directed:
mask = row_ids <= column_ids
row_ids = row_ids[mask]
column_ids = column_ids[mask]
weights = weights[mask]
df = | pd.DataFrame({"source": row_ids, "target": column_ids, "weight": weights}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import os.path
import argparse
import time
import json
import multiprocessing as mp
import ray
from ms_deisotope import deconvolute_peaks, averagine, scoring
from ms_deisotope.deconvolution import peak_retention_strategy
import pickle
import configparser
from configparser import ExtendedInterpolation
from os.path import expanduser
import peakutils
from scipy import signal
import math
from sklearn.metrics.pairwise import cosine_similarity
import alphatims.bruker
import glob
# peak and valley detection parameters
PEAKS_THRESHOLD_RT = 0.5 # only consider peaks that are higher than this proportion of the normalised maximum
PEAKS_THRESHOLD_SCAN = 0.5
PEAKS_MIN_DIST_RT = 2.0 # seconds
PEAKS_MIN_DIST_SCAN = 10.0 # scans
VALLEYS_THRESHOLD_RT = 0.5 # only consider valleys that drop more than this proportion of the normalised maximum
VALLEYS_THRESHOLD_SCAN = 0.5
VALLEYS_MIN_DIST_RT = 2.0 # seconds
VALLEYS_MIN_DIST_SCAN = 10.0 # scans
# filter parameters
SCAN_FILTER_POLY_ORDER = 5
RT_FILTER_POLY_ORDER = 3
# determine the maximum filter length for the number of points
def find_filter_length(number_of_points):
filter_lengths = [51,11,5] # must be a positive odd number, greater than the polynomial order, and less than the number of points to be filtered
return filter_lengths[next(x[0] for x in enumerate(filter_lengths) if x[1] < number_of_points)]
# calculate the intensity-weighted centroid
# takes a numpy array of intensity, and another of mz
def intensity_weighted_centroid(_int_f, _x_f):
return ((_int_f/_int_f.sum()) * _x_f).sum()
# peaks_a is a numpy array of [mz,intensity]
# returns a numpy array of [intensity_weighted_centroid,summed_intensity]
def intensity_descent(peaks_a, peak_delta=None):
# intensity descent
peaks_l = []
while len(peaks_a) > 0:
# find the most intense point
max_intensity_index = np.argmax(peaks_a[:,1])
peak_mz = peaks_a[max_intensity_index,0]
if peak_delta == None:
peak_delta = calculate_peak_delta(mz=peak_mz)
peak_mz_lower = peak_mz - peak_delta
peak_mz_upper = peak_mz + peak_delta
# get all the raw points within this m/z region
peak_indexes = np.where((peaks_a[:,0] >= peak_mz_lower) & (peaks_a[:,0] <= peak_mz_upper))[0]
if len(peak_indexes) > 0:
mz_cent = intensity_weighted_centroid(peaks_a[peak_indexes,1], peaks_a[peak_indexes,0])
summed_intensity = peaks_a[peak_indexes,1].sum()
peaks_l.append((mz_cent, summed_intensity))
# remove the raw points assigned to this peak
peaks_a = np.delete(peaks_a, peak_indexes, axis=0)
return np.array(peaks_l)
# find 3sigma for a specified m/z
def calculate_peak_delta(mz):
delta_m = mz / INSTRUMENT_RESOLUTION # FWHM of the peak
sigma = delta_m / 2.35482 # std dev is FWHM / 2.35482. See https://mathworld.wolfram.com/GaussianFunction.html
peak_delta = 3 * sigma # 99.7% of values fall within +/- 3 sigma
return peak_delta
# calculate the monoisotopic mass
def calculate_monoisotopic_mass_from_mz(monoisotopic_mz, charge):
monoisotopic_mass = (monoisotopic_mz * charge) - (PROTON_MASS * charge)
return monoisotopic_mass
# Find the ratio of H(peak_number)/H(peak_number-1) for peak_number=1..6
# peak_number = 0 refers to the monoisotopic peak
# number_of_sulphur = number of sulphur atoms in the molecule
#
# source: Valkenborg et al, "A Model-Based Method for the Prediction of the Isotopic Distribution of Peptides", https://core.ac.uk/download/pdf/82021511.pdf
def peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur):
MAX_NUMBER_OF_SULPHUR_ATOMS = 3
MAX_NUMBER_OF_PREDICTED_RATIOS = 6
S0_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S0_r[1] = np.array([-0.00142320578040, 0.53158267080224, 0.00572776591574, -0.00040226083326, -0.00007968737684])
S0_r[2] = np.array([0.06258138406507, 0.24252967352808, 0.01729736525102, -0.00427641490976, 0.00038011211412])
S0_r[3] = np.array([0.03092092306220, 0.22353930450345, -0.02630395501009, 0.00728183023772, -0.00073155573939])
S0_r[4] = np.array([-0.02490747037406, 0.26363266501679, -0.07330346656184, 0.01876886839392, -0.00176688757979])
S0_r[5] = np.array([-0.19423148776489, 0.45952477474223, -0.18163820209523, 0.04173579115885, -0.00355426505742])
S0_r[6] = np.array([0.04574408690798, -0.05092121193598, 0.13874539944789, -0.04344815868749, 0.00449747222180])
S1_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S1_r[1] = np.array([-0.01040584267474, 0.53121149663696, 0.00576913817747, -0.00039325152252, -0.00007954180489])
S1_r[2] = np.array([0.37339166598255, -0.15814640001919, 0.24085046064819, -0.06068695741919, 0.00563606634601])
S1_r[3] = np.array([0.06969331604484, 0.28154425636993, -0.08121643989151, 0.02372741957255, -0.00238998426027])
S1_r[4] = np.array([0.04462649178239, 0.23204790123388, -0.06083969521863, 0.01564282892512, -0.00145145206815])
S1_r[5] = np.array([-0.20727547407753, 0.53536509500863, -0.22521649838170, 0.05180965157326, -0.00439750995163])
S1_r[6] = np.array([0.27169670700251, -0.37192045082925, 0.31939855191976, -0.08668833166842, 0.00822975581940])
S2_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S2_r[1] = np.array([-0.01937823810470, 0.53084210514216, 0.00580573751882, -0.00038281138203, -0.00007958217070])
S2_r[2] = np.array([0.68496829280011, -0.54558176102022, 0.44926662609767, -0.11154849560657, 0.01023294598884])
S2_r[3] = np.array([0.04215807391059, 0.40434195078925, -0.15884974959493, 0.04319968814535, -0.00413693825139])
S2_r[4] = np.array([0.14015578207913, 0.14407679007180, -0.01310480312503, 0.00362292256563, -0.00034189078786])
S2_r[5] = np.array([-0.02549241716294, 0.32153542852101, -0.11409513283836, 0.02617210469576, -0.00221816103608])
S2_r[6] = np.array([-0.14490868030324, 0.33629928307361, -0.08223564735018, 0.01023410734015, -0.00027717589598])
model_params = np.empty(MAX_NUMBER_OF_SULPHUR_ATOMS, dtype=np.ndarray)
model_params[0] = S0_r
model_params[1] = S1_r
model_params[2] = S2_r
ratio = None
if (((1 <= peak_number <= 3) & (((number_of_sulphur == 0) & (498 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (530 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (562 <= monoisotopic_mass <= 3978)))) |
((peak_number == 4) & (((number_of_sulphur == 0) & (907 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (939 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (971 <= monoisotopic_mass <= 3978)))) |
((peak_number == 5) & (((number_of_sulphur == 0) & (1219 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (1251 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (1283 <= monoisotopic_mass <= 3978)))) |
((peak_number == 6) & (((number_of_sulphur == 0) & (1559 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (1591 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (1623 <= monoisotopic_mass <= 3978))))):
beta0 = model_params[number_of_sulphur][peak_number][0]
beta1 = model_params[number_of_sulphur][peak_number][1]
beta2 = model_params[number_of_sulphur][peak_number][2]
beta3 = model_params[number_of_sulphur][peak_number][3]
beta4 = model_params[number_of_sulphur][peak_number][4]
scaled_m = monoisotopic_mass / 1000.0
ratio = beta0 + (beta1*scaled_m) + beta2*(scaled_m**2) + beta3*(scaled_m**3) + beta4*(scaled_m**4)
return ratio
# calculate the cosine similarity of two peaks; each DF is assumed to have an 'x' column that reflects the x-axis values, and an 'intensity' column
def measure_peak_similarity(isotopeA_df, isotopeB_df, x_label, scale):
# scale the x axis so we can join them
isotopeA_df['x_scaled'] = (isotopeA_df[x_label] * scale).astype(int)
isotopeB_df['x_scaled'] = (isotopeB_df[x_label] * scale).astype(int)
# combine the isotopes by aligning the x-dimension points they have in common
combined_df = pd.merge(isotopeA_df, isotopeB_df, on='x_scaled', how='inner', suffixes=('_A', '_B')).sort_values(by='x_scaled')
combined_df = combined_df[['x_scaled','intensity_A','intensity_B']]
# calculate the similarity
return float(cosine_similarity([combined_df.intensity_A.values], [combined_df.intensity_B.values])) if len(combined_df) > 0 else None
# determine the mono peak apex and extent in CCS and RT and calculate isotopic peak intensities
def determine_mono_characteristics(envelope, mono_mz_lower, mono_mz_upper, monoisotopic_mass, cuboid_points_df):
# determine the raw points that belong to the mono peak
# we use the wider cuboid points because we want to discover the apex and extent in CCS and RT
mono_points_df = cuboid_points_df[(cuboid_points_df.mz >= mono_mz_lower) & (cuboid_points_df.mz <= mono_mz_upper)]
# determine the peak's extent in CCS and RT
if len(mono_points_df) > 0:
# collapsing the monoisotopic's summed points onto the mobility dimension
scan_df = mono_points_df.groupby(['scan'], as_index=False).intensity.sum()
scan_df.sort_values(by=['scan'], ascending=True, inplace=True)
# apply a smoothing filter to the points
scan_df['filtered_intensity'] = scan_df.intensity # set the default
try:
scan_df['filtered_intensity'] = signal.savgol_filter(scan_df.intensity, window_length=find_filter_length(number_of_points=len(scan_df)), polyorder=SCAN_FILTER_POLY_ORDER)
except:
pass
# find the peak(s)
peak_x_l = []
try:
peak_idxs = peakutils.indexes(scan_df.filtered_intensity.values.astype(int), thres=PEAKS_THRESHOLD_SCAN, min_dist=PEAKS_MIN_DIST_SCAN, thres_abs=False)
peak_x_l = scan_df.iloc[peak_idxs].scan.to_list()
except:
pass
if len(peak_x_l) == 0:
# if we couldn't find any peaks, take the maximum intensity point
peak_x_l = [scan_df.loc[scan_df.filtered_intensity.idxmax()].scan]
# peaks_df should now contain the rows from flattened_points_df that represent the peaks
peaks_df = scan_df[scan_df.scan.isin(peak_x_l)].copy()
# find the closest peak to the cuboid midpoint
cuboid_midpoint_scan = scan_df.scan.min() + ((scan_df.scan.max() - scan_df.scan.min()) / 2)
peaks_df['delta'] = abs(peaks_df.scan - cuboid_midpoint_scan)
peaks_df.sort_values(by=['delta'], ascending=True, inplace=True)
scan_apex = peaks_df.iloc[0].scan
# find the valleys nearest the scan apex
valley_idxs = peakutils.indexes(-scan_df.filtered_intensity.values.astype(int), thres=VALLEYS_THRESHOLD_SCAN, min_dist=VALLEYS_MIN_DIST_SCAN, thres_abs=False)
valley_x_l = scan_df.iloc[valley_idxs].scan.to_list()
valleys_df = scan_df[scan_df.scan.isin(valley_x_l)]
upper_x = valleys_df[valleys_df.scan > scan_apex].scan.min()
if math.isnan(upper_x):
upper_x = scan_df.scan.max()
lower_x = valleys_df[valleys_df.scan < scan_apex].scan.max()
if math.isnan(lower_x):
lower_x = scan_df.scan.min()
scan_lower = lower_x
scan_upper = upper_x
# constrain the mono points to the CCS extent
mono_points_df = mono_points_df[(mono_points_df.scan >= scan_lower) & (mono_points_df.scan <= scan_upper)]
# in the RT dimension, look wider to find the apex
rt_df = mono_points_df.groupby(['frame_id','retention_time_secs'], as_index=False).intensity.sum()
rt_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)
# filter the points
rt_df['filtered_intensity'] = rt_df.intensity # set the default
try:
rt_df['filtered_intensity'] = signal.savgol_filter(rt_df.intensity, window_length=find_filter_length(number_of_points=len(rt_df)), polyorder=RT_FILTER_POLY_ORDER)
except:
pass
# find the peak(s)
peak_x_l = []
try:
peak_idxs = peakutils.indexes(rt_df.filtered_intensity.values.astype(int), thres=PEAKS_THRESHOLD_RT, min_dist=PEAKS_MIN_DIST_RT, thres_abs=False)
peak_x_l = rt_df.iloc[peak_idxs].retention_time_secs.to_list()
except:
pass
if len(peak_x_l) == 0:
# if we couldn't find any peaks, take the maximum intensity point
peak_x_l = [rt_df.loc[rt_df.filtered_intensity.idxmax()].retention_time_secs]
# peaks_df should now contain the rows from flattened_points_df that represent the peaks
peaks_df = rt_df[rt_df.retention_time_secs.isin(peak_x_l)].copy()
# find the closest peak to the cuboid midpoint
cuboid_midpoint_rt = rt_df.retention_time_secs.min() + ((rt_df.retention_time_secs.max() - rt_df.retention_time_secs.min()) / 2)
peaks_df['delta'] = abs(peaks_df.retention_time_secs - cuboid_midpoint_rt)
peaks_df.sort_values(by=['delta'], ascending=True, inplace=True)
rt_apex = peaks_df.iloc[0].retention_time_secs
# find the valleys nearest the RT apex
valley_idxs = peakutils.indexes(-rt_df.filtered_intensity.values.astype(int), thres=VALLEYS_THRESHOLD_RT, min_dist=VALLEYS_MIN_DIST_RT, thres_abs=False)
valley_x_l = rt_df.iloc[valley_idxs].retention_time_secs.to_list()
valleys_df = rt_df[rt_df.retention_time_secs.isin(valley_x_l)]
upper_x = valleys_df[valleys_df.retention_time_secs > rt_apex].retention_time_secs.min()
if math.isnan(upper_x):
upper_x = rt_df.retention_time_secs.max()
lower_x = valleys_df[valleys_df.retention_time_secs < rt_apex].retention_time_secs.max()
if math.isnan(lower_x):
lower_x = rt_df.retention_time_secs.min()
rt_lower = lower_x
rt_upper = upper_x
# constrain the mono points to the RT extent
mono_points_df = mono_points_df[(mono_points_df.retention_time_secs >= rt_lower) & (mono_points_df.retention_time_secs <= rt_upper)]
# for the whole feature, constrain the raw points to the CCS and RT extent of the monoisotopic peak
mono_ccs_rt_extent_df = cuboid_points_df[(cuboid_points_df.scan >= scan_lower) & (cuboid_points_df.scan <= scan_upper) & (cuboid_points_df.retention_time_secs >= rt_lower) & (cuboid_points_df.retention_time_secs <= rt_upper)]
# calculate the isotope intensities from the constrained raw points
isotopes_l = []
for idx,isotope in enumerate(envelope):
# gather the points that belong to this isotope
iso_mz = isotope[0]
iso_intensity = isotope[1]
iso_mz_delta = calculate_peak_delta(iso_mz)
iso_mz_lower = iso_mz - iso_mz_delta
iso_mz_upper = iso_mz + iso_mz_delta
isotope_df = mono_ccs_rt_extent_df[(mono_ccs_rt_extent_df.mz >= iso_mz_lower) & (mono_ccs_rt_extent_df.mz <= iso_mz_upper)]
if len(isotope_df) > 0:
# find the intensity by summing the maximum point in the frame closest to the RT apex, and the frame maximums either side
frame_maximums_df = isotope_df.groupby(['retention_time_secs'], as_index=False, sort=False).intensity.agg(['max']).reset_index()
frame_maximums_df['rt_delta'] = np.abs(frame_maximums_df.retention_time_secs - rt_apex)
frame_maximums_df.sort_values(by=['rt_delta'], ascending=True, inplace=True)
# sum the maximum intensity and the max intensity of the frame either side in RT
summed_intensity = frame_maximums_df[:3]['max'].sum()
# are any of the three points in saturation?
isotope_in_saturation = (frame_maximums_df[:3]['max'].max() > SATURATION_INTENSITY)
# determine the isotope's profile in retention time
iso_rt_df = isotope_df.groupby(['retention_time_secs'], as_index=False).intensity.sum()
iso_rt_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)
# measure it's elution similarity with the previous isotope
similarity_rt = measure_peak_similarity(pd.read_json(isotopes_l[idx-1]['rt_df']), iso_rt_df, x_label='retention_time_secs', scale=100) if idx > 0 else None
# determine the isotope's profile in mobility
iso_scan_df = isotope_df.groupby(['scan'], as_index=False).intensity.sum()
iso_scan_df.sort_values(by=['scan'], ascending=True, inplace=True)
# measure it's elution similarity with the previous isotope
similarity_scan = measure_peak_similarity(pd.read_json(isotopes_l[idx-1]['scan_df']), iso_scan_df, x_label='scan', scale=1) if idx > 0 else None
# add the isotope to the list
isotopes_l.append({'mz':iso_mz, 'mz_lower':iso_mz_lower, 'mz_upper':iso_mz_upper, 'intensity':summed_intensity, 'saturated':isotope_in_saturation, 'rt_df':iso_rt_df.to_json(orient='records'), 'scan_df':iso_scan_df.to_json(orient='records'), 'similarity_rt':similarity_rt, 'similarity_scan':similarity_scan})
else:
break
isotopes_df = | pd.DataFrame(isotopes_l) | pandas.DataFrame |
import unittest
from random import random
from craft_ai.pandas import CRAFTAI_PANDAS_ENABLED
if CRAFTAI_PANDAS_ENABLED:
import copy
import pandas as pd
from numpy.random import randn
import craft_ai.pandas
from .data import pandas_valid_data, valid_data
from .utils import generate_entity_id
from . import settings
AGENT_ID_1_BASE = "test_pandas_1"
AGENT_ID_2_BASE = "test_pandas_2"
GENERATOR_ID_BASE = "test_pandas_generator"
SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION
SIMPLE_AGENT_BOOSTING_CONFIGURATION = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION
)
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE
)
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = (
pandas_valid_data.AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE
)
SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA
SIMPLE_AGENT_BOOSTING_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_DATA
SIMPLE_AGENT_BOOSTING_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_MANY_DATA
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = (
pandas_valid_data.AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA
)
SIMPLE_AGENT_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_MANY_DATA
COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION
COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2
COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA
COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2
DATETIME_AGENT_CONFIGURATION = pandas_valid_data.DATETIME_AGENT_CONFIGURATION
DATETIME_AGENT_DATA = pandas_valid_data.DATETIME_AGENT_DATA
MISSING_AGENT_CONFIGURATION = pandas_valid_data.MISSING_AGENT_CONFIGURATION
MISSING_AGENT_DATA = pandas_valid_data.MISSING_AGENT_DATA
MISSING_AGENT_DATA_DECISION = pandas_valid_data.MISSING_AGENT_DATA_DECISION
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_CONFIGURATION
)
INVALID_PYTHON_IDENTIFIER_DATA = pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DATA
INVALID_PYTHON_IDENTIFIER_DECISION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DECISION
)
EMPTY_TREE = pandas_valid_data.EMPTY_TREE
CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_bad_index(self):
df = pd.DataFrame(randn(10, 5), columns=["a", "b", "c", "d", "e"])
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
def test_add_agent_operations_df(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_websocket(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_unexpected_property(self):
df = pd.DataFrame(
randn(300, 6),
columns=["a", "b", "c", "d", "e", "f"],
index=pd.date_range("20200101", periods=300, freq="T").tz_localize(
"Europe/Paris"
),
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_complex_agent(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_complex_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz_websocket(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_missing_agent(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_missing_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 300)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:59:00", tz="Europe/Paris"),
)
def test_get_agent_states_df(self):
df = CLIENT.get_agent_states(self.agent_id)
self.assertEqual(len(df), 180)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:58:20", tz="Europe/Paris"),
)
def test_tree_visualization(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
craft_ai.pandas.utils.create_tree_html(tree1, "", "constant", None, 500)
def test_display_tree_raised_error(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiError,
craft_ai.pandas.utils.display_tree,
tree1,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithOperations(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWOp")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_decision_tree_with_pdtimestamp(self):
# test if we get the same decision tree
decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC")
)
ground_truth_decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(decision_tree, dict)
self.assertNotEqual(decision_tree.get("_version"), None)
self.assertNotEqual(decision_tree.get("configuration"), None)
self.assertNotEqual(decision_tree.get("trees"), None)
self.assertEqual(decision_tree, ground_truth_decision_tree)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df_complex_agent(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 3)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"),
)
def test_decide_from_contexts_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = COMPLEX_AGENT_DATA
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 6)
self.assertTrue(test_df.equals(test_df_copy))
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"),
)
# Also works as before, with a plain context
output = CLIENT.decide(tree, {"a": 1, "tz": "+02:00"})
self.assertEqual(output["output"]["b"]["predicted_value"], "Pierre")
def test_decide_from_contexts_df_zero_rows(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = COMPLEX_AGENT_DATA.iloc[:0, :]
self.assertRaises(
craft_ai.errors.CraftAiBadRequestError,
CLIENT.decide_from_contexts_df,
tree,
test_df,
)
def test_decide_from_contexts_df_empty_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.errors.CraftAiBadRequestError,
CLIENT.decide_from_contexts_df,
tree,
pd.DataFrame(),
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent2WithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent2WData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_contexts_df_null_decisions(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = pd.DataFrame(
[["Jean-Pierre", "+02:00"], ["Paul"]],
columns=["b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 2)
self.assertTrue(test_df.equals(test_df_copy))
self.assertTrue(pd.notnull(df["a_predicted_value"][0]))
self.assertTrue(pd.notnull(df["a_predicted_value"][1]))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent3WithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent3WData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA_2)
def test_decide_from_contexts_df_empty_tree(self):
test_df = pd.DataFrame(
[[0, "Jean-Pierre", "+02:00"], [1, "Paul", "+02:00"]],
columns=["a", "b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
df = CLIENT.decide_from_contexts_df(EMPTY_TREE, test_df)
expected_error_message = (
"Unable to take decision: the decision tree is not "
"based on any context operations."
)
self.assertEqual(len(df), 2)
self.assertEqual(df.columns, ["error"])
self.assertEqual(df["error"][0], expected_error_message)
self.assertEqual(df["error"][1], expected_error_message)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_contexts_df_with_array(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA_2.last_valid_index().value // 10 ** 9
)
test_df = pd.DataFrame(
[["Jean-Pierre", "+02:00"], ["Paul"]],
columns=["b", "tz"],
index=pd.date_range("20200201", periods=2, freq="D").tz_localize(
"Europe/Paris"
),
)
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 2)
self.assertTrue(test_df.equals(test_df_copy))
self.assertTrue(pd.notnull(df["a_predicted_value"][0]))
self.assertTrue(pd.notnull(df["a_predicted_value"][1]))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_missing_contexts_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9, "2"
)
df = CLIENT.decide_from_contexts_df(tree, MISSING_AGENT_DATA_DECISION)
self.assertEqual(len(df), 2)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-02 00:00:00", tz="Europe/Paris"),
)
# Also works as before, with a context containing an optional value
output = CLIENT.decide(tree, {"b": {}, "tz": "+02:00"})
self.assertTrue(pd.notnull(output["output"]["a"]["predicted_value"]))
# Also works as before, with a context containing a missing value
output = CLIENT.decide(tree, {"b": None, "tz": "+02:00"})
self.assertTrue(pd.notnull(output["output"]["a"]["predicted_value"]))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasDatetimeAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "DatetimeAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(DATETIME_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, DATETIME_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_datetime_states_df(self):
df = CLIENT.get_agent_states(self.agent_id)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 4)
self.assertEqual(df["myTimeOfDay"].tolist(), [2, 3, 6, 7, 4, 5, 14, 15, 16, 19])
# This test is commented because of the current non-deterministic behavior of craft ai.
# def test_datetime_decide_from_contexts_df(self):
# tree = CLIENT.get_agent_decision_tree(AGENT_ID,
# DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9)
# test_df = pd.DataFrame(
# [
# [1],
# [3],
# [7]
# ],
# columns=["a"],
# index=pd.date_range("20200101 00:00:00",
# periods=3,
# freq="H").tz_localize("Asia/Shanghai"))
# test_df_copy = test_df.copy(deep=True)
# df = CLIENT.decide_from_contexts_df(tree, test_df)
# self.assertEqual(len(df), 3)
# self.assertEqual(len(df.dtypes), 6)
# self.assertEqual(df["b_predicted_value"].tolist(), ["Pierre", "Paul", "Jacques"])
# self.assertTrue(test_df.equals(test_df_copy))
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasAgentWithInvalidIdentifier(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "InvalidIdentifier")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(INVALID_PYTHON_IDENTIFIER_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, INVALID_PYTHON_IDENTIFIER_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_from_python_invalid_identifier(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id,
INVALID_PYTHON_IDENTIFIER_DATA.last_valid_index().value // 10 ** 9,
"2",
)
test_df = INVALID_PYTHON_IDENTIFIER_DECISION.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 3)
self.assertEqual(len(df.dtypes), 8)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasGeneratorWithOperation(unittest.TestCase):
def setUp(self):
self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + "GeneratorWithOp")
self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + "GeneratorWithOp")
self.generator_id = generate_entity_id(GENERATOR_ID_BASE + "GeneratorWithOp")
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_1_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_2_id)
CLIENT.add_agent_operations(self.agent_1_id, valid_data.VALID_OPERATIONS_SET)
CLIENT.add_agent_operations(self.agent_2_id, valid_data.VALID_OPERATIONS_SET)
generator_configuration = copy.deepcopy(
valid_data.VALID_GENERATOR_CONFIGURATION
)
generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id]
CLIENT.create_generator(generator_configuration, self.generator_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
def test_get_generator_decision_tree_with_pdtimestamp(self):
# test if we get the same decision tree
decision_tree = CLIENT.get_generator_decision_tree(
self.generator_id,
pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC"),
)
ground_truth_decision_tree = CLIENT.get_generator_decision_tree(
self.generator_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(decision_tree, dict)
self.assertNotEqual(decision_tree.get("_version"), None)
self.assertNotEqual(decision_tree.get("configuration"), None)
self.assertNotEqual(decision_tree.get("trees"), None)
self.assertEqual(decision_tree, ground_truth_decision_tree)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasBoostingSimpleAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "BoostingAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_BOOSTING_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_decide_boosting_from_contexts_df(self):
context_df = pd.DataFrame(
[[random(), random(), random(), "+01:00"] for i in range(4)],
columns=["b", "c", "d", "e"],
index=pd.date_range("20200101", periods=4, freq="T").tz_localize(
"Europe/Paris",
),
)
decisions = CLIENT.decide_boosting_from_contexts_df(
self.agent_id,
SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9,
SIMPLE_AGENT_BOOSTING_DATA.last_valid_index().value // 10 ** 9,
context_df,
)
self.assertEqual(decisions.shape[0], 4)
self.assertTrue(len(decisions.columns) == 1)
self.assertTrue("a_predicted_value" in decisions.columns)
self.assertTrue(
type(decisions.iloc[0]["a_predicted_value"]) == float
or type(decisions.iloc[0]["a_predicted_value"] == int)
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasBoostingGeneratorWithOperation(unittest.TestCase):
def setUp(self):
self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + "BoostGeneratorWithOp")
self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + "BoostGeneratorWithOp")
self.generator_id = generate_entity_id(
GENERATOR_ID_BASE + "BoostGeneratorWithOp"
)
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_1_id)
CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_2_id)
CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_BOOSTING_DATA)
CLIENT.add_agent_operations(self.agent_2_id, SIMPLE_AGENT_BOOSTING_MANY_DATA)
generator_configuration = copy.deepcopy(SIMPLE_AGENT_BOOSTING_CONFIGURATION)
generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id]
CLIENT.create_generator(generator_configuration, self.generator_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
def test_get_generator_boosting_with_pdtimestamp(self):
context_df = pd.DataFrame(
[[random(), random(), random(), "+01:00"] for i in range(4)],
columns=["b", "c", "d", "e"],
index=pd.date_range("20200101", periods=4, freq="T").tz_localize(
"Europe/Paris",
),
)
decisions = CLIENT.decide_generator_boosting_from_contexts_df(
self.generator_id,
SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9,
SIMPLE_AGENT_BOOSTING_MANY_DATA.last_valid_index().value // 10 ** 9,
context_df,
)
self.assertEqual(decisions.shape[0], 4)
self.assertTrue(len(decisions.columns) == 1)
self.assertTrue("a_predicted_value" in decisions.columns)
self.assertTrue(
type(decisions.iloc[0]["a_predicted_value"]) == float
or type(decisions.iloc[0]["a_predicted_value"] == int)
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasBoostingGeneratorWithGeneratedType(unittest.TestCase):
def setUp(self):
self.agent_1_id = generate_entity_id(
AGENT_ID_1_BASE + "BoostGeneratorWithGenType"
)
self.agent_2_id = generate_entity_id(
AGENT_ID_2_BASE + "BoostGeneratorWithGenType"
)
self.generator_id = generate_entity_id(
GENERATOR_ID_BASE + "BoostGeneratorWithGenType"
)
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
CLIENT.create_agent(
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE, self.agent_1_id
)
CLIENT.create_agent(
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE, self.agent_2_id
)
CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_BOOSTING_DATA)
CLIENT.add_agent_operations(self.agent_2_id, SIMPLE_AGENT_BOOSTING_MANY_DATA)
generator_configuration = copy.deepcopy(
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE
)
generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id]
CLIENT.create_generator(generator_configuration, self.generator_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_1_id)
CLIENT.delete_agent(self.agent_2_id)
CLIENT.delete_generator(self.generator_id)
def test_get_generator_boosting_with_pdtimestamp(self):
context_df = pd.DataFrame(
[[random(), random(), random(), "+01:00"] for i in range(4)],
columns=["b", "c", "d", "e"],
index= | pd.date_range("20200101", periods=4, freq="T") | pandas.date_range |
# install pattern
# install gensim
# install nltk
# install pyspellchecker
import re
import pandas as pd
import numpy as np
import gensim
from collections import Counter
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from spellchecker import SpellChecker
class Cleaning:
def __init__(self):
self.WORDS = {}
return
# remove urls (starts with https, http)
def remove_URL(self, col):
text = col.tolist()
TEXT=[]
for word in text:
if pd.isnull(word):
TEXT.append(word)
else:
TEXT.append(re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', str(word)))
se = pd.Series(TEXT)
return(se)
def count_mark(self, col):
df = pd.DataFrame()
rdf = pd.DataFrame()
# remove the special characters (numbers, exclamations and question marks) from the text
# store them in a dataframe for later use
text = col.tolist()
for row in text:
if pd.isnull(row):
ser = pd.Series([np.nan,np.nan,np.nan,np.nan], index=['Number', 'Exclamation_count', 'Question_Mark_count', 'Comments_OE'])
df = df.append(ser, ignore_index=True)
else:
numVals = []
excCount = []
quesCount = []
num = re.findall(r'\b\d+\b', row)
numVals.append(num)
# remove the number from the text
for n in num:
row = row.replace(n, '')
excCount.append(row.count('!'))
row = row.replace('!', '')
quesCount.append(row.count('?'))
row = row.replace('?', '')
numSeries = | pd.Series(numVals) | pandas.Series |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = pd.DataFrame(topGenes2, columns=col)
out = []
out.append("Comparing the two TopGenes:\n")
# After the benchmark, the appended list and then converted to whole string seems to be the least consuming
list_name = list(topGenes1.columns)
if lst_col is not None:
list_name = [list_name[ind] for ind in lst_col]
for name in list_name:
out.append(
"{0:{fill}{align}40}\n".format(" Class %s " % name, fill="=", align="^")
)
col_1 = np.array(topGenes1[[name]], dtype=str)
col_2 = np.array(topGenes2[[name]], dtype=str)
# Here np.nozero will return a tuple of 2 array corresponding the first
# and the second dimension while the value of second dimension will
# always be 0. So the first dimension's last location+1 will be the length
# of nonzero arrays and that it's just the location of the first zero
# element
length_nonzero_1 = np.nonzero(col_1)[0][-1] + 1
length_nonzero_2 = np.nonzero(col_2)[0][-1] + 1
# np.nonzero will not detect '0.0' as zero type
if all(col_1 == "0.0"):
length_nonzero_1 = 0
if all(col_2 == "0.0"):
length_nonzero_2 = 0
length_min = min(length_nonzero_1, length_nonzero_2)
# Check if at least one of the classes contains only zero and avoid the error
if length_min == 0 and length_nonzero_1 == length_nonzero_2:
out.append(
"* Warning: No feature is selected for both two class\n Skipped for this class"
)
continue
elif length_min == 0 and length_nonzero_1 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes2\n"
)
out.append(
"* All {} elements are included only in topGenes1:\n".format(
min(length_nonzero_1, nbr_limit)
)
)
for k in range(min(length_nonzero_1, nbr_limit)):
if normW1 is None:
out.append(" (%s)\n" % (str(col_1[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_1[k, 0]), normW1[[name]].iloc[k, 0])
)
continue
elif length_min == 0 and length_nonzero_2 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes1\n"
)
out.append(
"* All {} elements are included only in topGenes2:\n".format(
min(length_nonzero_2, nbr_limit)
)
)
for k in range(min(length_nonzero_2, nbr_limit)):
if normW2 is None:
out.append(" (%s)\n" % (str(col_2[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_2[k, 0]), normW2[[name]].iloc[k, 0])
)
continue
if length_min < nbr_limit:
length = length_min
out.append(
"* Warning: In this column, the 1st topGenes has {} nozero elements\n* while the 2nd one has {} nonzero elements\n".format(
length_nonzero_1, length_nonzero_2
)
)
out.append("* So only first %d elements are compared\n\n" % length_min)
else:
length = nbr_limit
set_1 = col_1[0:length]
set_2 = col_2[0:length]
set_common = np.intersect1d(set_1, set_2) # Have in common
set_o1 = np.setdiff1d(set_1, set_2) # Exclusively in topGenes1
set_o2 = np.setdiff1d(set_2, set_1) # Exclusively in topGenes2
lc = len(set_common)
# print exclusively in topGenes1
out.append(
"Included exclusively in first topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW1 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes1[[name]].isin(set_o1))
for i, j in zip(idx_i, idx_j):
if normW1 is None:
out.append(" (%s)\n" % str(set_1[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_1[i, j]), str(normW1[[name]].iloc[i, j]))
)
out.append("\nNumber of elements in common:{}\n".format(lc))
# print exclusively in topGenes1
out.append(
"\nIncluded exclusively in second topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW2 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes2[[name]].isin(set_o2))
for i, j in zip(idx_i, idx_j):
if normW2 is None:
out.append(" (%s)\n" % str(set_2[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_2[i, j]), str(normW2[[name]].iloc[i, j]))
)
out.append("{:-<40}\n".format(""))
out = "".join(out)
if printOut == True:
print(out)
return out
def heatmap_classification(
Ytest,
YR,
clusternames,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and a list of the names of clusters (clusternames)
# as input and provide the heatmap matrix as the output
#=====================================================
"""
k = len(np.unique(YR)) # If we need to automatically find a k
Heatmap_matrix = np.zeros((k, k))
for i in np.arange(k) + 1:
for j in np.arange(k) + 1:
a = np.where(
Ytest[YR == i] == j, 1, 0
).sum() # number Ytest ==j where YR==i
b = np.where(YR == i, 1, 0).sum()
Heatmap_matrix[i - 1, j - 1] = a / b
# Plotting
if draw_fig == True:
plt.figure(figsize=(10, 6))
annot = False
if k > 10:
annot = False
if clusternames is not None:
axes = sns.heatmap(
Heatmap_matrix,
cmap="jet",
annot=annot,
fmt=".2f",
xticklabels=clusternames,
yticklabels=clusternames,
)
else:
axes = sns.heatmap(Heatmap_matrix, cmap="jet", annot=annot, fmt=".2f")
axes.set_xlabel("Predicted true positive", fontsize=14)
axes.set_ylabel("Ground true", fontsize=14)
axes.tick_params(labelsize=7)
plt.xticks(rotation=rotate)
axes.set_title("Heatmap of confusion Matrix", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig(
"{}{}_Heatmap_of_confusion_Matrix.png".format(outputPath, func_tag)
)
return Heatmap_matrix
def heatmap_normW(
normW,
clusternames=None,
nbr_l=10,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and the number of clusters (k) as input and provide the
# heatmap matrix as the output
#=====================================================
"""
A = np.abs(normW)
AN = A / A[0, :]
if normW.shape[0] < nbr_l:
nbr_l = normW.shape[0]
ANR = AN[0:nbr_l, :]
annot = False
if draw_fig == True:
plt.figure(figsize=(10, 6))
# axes2=sns.heatmap(ANR,cmap='jet',annot=annot,fmt='.3f')
if clusternames is None:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
else:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
xticklabels=clusternames,
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
plt.xticks(rotation=rotate)
axes2.set_ylabel("Features", fontsize=14)
axes2.set_xlabel("Clusters", fontsize=14)
axes2.tick_params(labelsize=7)
axes2.set_title("Heatmap of Matrix W", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig("{}{}_Heatmap_of_signature.png".format(outputPath, func_tag))
return ANR
def drop_cells_with_ID(X, Y, ID, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y, ID
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
ID_new = np.delete(ID, lst_del, 0)
return X_new, Y_new, ID_new
def drop_cells(X, Y, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
return X_new, Y_new
# ===================== Algorithms =======================================
def FISTA_Primal(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# niter : The number of iterations
# gamma : The hyper parameter gamma
# eta : The eta to calculate the projection on l1 ball
# * isEpsilon is not used in the original file in Matlab
# --- Output
# w : The projection matrix
# mu : The centers
# nbGenes_fin : The number of genes of the final step
# loss : The loss for each iteration
# ====================================================================
"""
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = ["niter", "eta", "gamma"] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
gamma = param["gamma"]
n, d = X.shape
# === With class2indicator():
# Y = class2indicator(YR,k)
# === With Onehotencoder:
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
loss = np.zeros(niter)
XtX = np.matmul(X.T, X)
XtY = np.matmul(X.T, Y)
w_old = np.ones((d, k))
w_loc = w_old
t_old = 1
for i in range(niter):
grad_w = np.matmul(XtX, w_loc) - XtY
# gradient step
V = w_loc - gamma * grad_w
V = np.reshape(V, d * k)
# Projection on the l1 ball
V = proj_l1ball(V, eta)
# Reshape back
w_new = np.reshape(V, (d, k))
# Chambolle method
t_new = (i + 6) / 4 # or i+6 since pyhton starts from 0 ?
w_loc_new = w_new + ((t_old - 1) / t_new) * (w_new - w_old)
w_old = w_new
w_loc = w_loc_new
t_old = t_new
loss[i] = np.linalg.norm(Y - np.matmul(X, w_loc), "fro") ** 2
# end iteratons
w = w_loc
mu = centroids(np.matmul(X, w), YR, k)
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss
def primal_dual_L1N(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta', 'tau', 'rho','sigma', 'beta', 'tau2' and 'delta'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# normY = np.linalg.norm(Y,2)
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta",
"tau",
"rho",
"sigma",
"delta",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
delta = param["delta"]
tau2 = param["tau2"]
# beta = param['beta']
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Reshape
V = np.reshape(V, d * k)
V = proj_l1ball(V, eta)
V[np.where(np.abs(V) < 0.001)] = 0
# Reshape back
w_new = np.reshape(V, (d, k))
# no gamma here
# w_new = w_new + gamma*(w_new - w_old) =>
w = 2 * w_new - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
# mu = mu_new + gamma*(mu_new - mu_old) =>
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w_new
Z_old = Z_new
loss[i] = np.linalg.norm(
np.matmul(Y, mu_new) - np.matmul(X, w_new), 1
) + 0.5 * (np.linalg.norm(Ik - mu_new, "fro") ** 2)
# End loop
Z = Z_old
w = w_new
mu = mu_new
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
def primal_dual_Nuclear(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta_star', 'tau', 'rho','sigma', 'tau2','delta'
# and 'gamma'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta_star",
"tau",
"rho",
"sigma",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta_star = param["eta_star"]
delta = param["delta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
tau2 = param["tau2"]
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Nuclear constraint
L, S0, R = np.linalg.svd(V, full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)), eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2 * w - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y, mu_new) - np.matmul(X, w), 1) + 0.5 * (
np.linalg.norm(Ik - mu_new, "fro") ** 2
)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin, _ = nb_Genes(w)
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
if "fista" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
elif "nuclear" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta_star", eta_star))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
else:
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print("{:-<30}".format(""))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
print("Training step ends.\n")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = | pd.DataFrame(accuracy_train, index=ind_df, columns=columns) | pandas.DataFrame |
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with HDFStore(path, "r") as store:
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
read_hdf(store, "k1")
def test_read_column(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
msg = re.escape("select_column() got an unexpected keyword argument 'where'")
with pytest.raises(TypeError, match=msg):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
msg = re.escape(
"column [values_block_0] can not be extracted individually; "
"it is not data indexable"
)
with pytest.raises(ValueError, match=msg):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[df3.index[4:6], "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_pytables_native_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows")
def test_pytables_native2_read(datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
def test_legacy_table_fixed_format_read_py2(datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_fixed_format_read_datetime_py2(datapath, setup_path):
# GH 31750
# legacy table with fixed format and datetime64 column written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
mode="r",
) as store:
result = store.select("df")
expected = DataFrame(
[[Timestamp("2020-02-06T18:00")]],
columns=["A"],
index=Index(["date"]),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_read_hdf_open_store(setup_path):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
direct = read_hdf(path, "df")
store = HDFStore(path, mode="r")
indirect = read_hdf(store, "df")
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w", format="t")
direct = read_hdf(path, "df")
iterator = read_hdf(path, "df", iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_nokey(setup_path):
# GH10443
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a")
msg = "key must be provided when HDF5 file contains multiple datasets."
with pytest.raises(ValueError, match=msg):
read_hdf(path)
def test_read_nokey_table(setup_path):
# GH13231
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="a", format="table")
reread = read_hdf(path)
tm.assert_frame_equal(df, reread)
df.to_hdf(path, "df2", mode="a", format="table")
msg = "key must be provided when HDF5 file contains multiple datasets."
with pytest.raises(ValueError, match=msg):
| read_hdf(path) | pandas.read_hdf |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = | Series(vals) | pandas.Series |
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc
import numpy as np
import pandas as pd
class analysis:
"""
Functions:
_getComplexParams
_getSimpleParams
_getF1
_getROC
_loadLog
"""
def __init__(self):
pass
def __reset(self, reset):
if reset:
self._getComplexParams(abs=True)
self._getSimpleParams()
else:
try: self.dfComplex_
except: self._getComplexParams(abs=True)
try: self.dfSimple_
except: self._getSimpleParams()
def _applyCut(self, param, pMin=None, pMax=None, prob=0.5, reset=False):
"""
Function for cutting observations that don't meet the given constraints.
Note that pMin or pMax cannot be used at the same time.
To call:
_applyCut(param, pMin=None, pMax=None, prob=0.5, reset=False)
Parameters:
param (string) column name in data frame.
pMin minimum cutoff
pMax maximum cutoff
prob threshold probability for classifying complex
reset revert to original data frame before cutting
Postcondition:
Observations that don't satisfy the constrains are removed
from the complex data frame. If param == 'sig', then the
cuts are also applied to the data frame for simple sources.
"""
# ===================================================
# Reset check and verify data frames exist.
# ===================================================
self.__reset(reset)
# ===================================================
# Remove the complex sources that don't
# satisfy the condition
# ===================================================
loc1 = self.dfComplex_[param] < pMin if pMin else self.dfComplex_[param] > pMax
self.dfComplex_.drop(self.dfComplex_.index[loc1], inplace=True)
# ===================================================
# If noise, remove the simple sources
# ===================================================
if param == 'sig':
loc2 = self.dfSimple_['sig'] < pMin if pMin else self.dfSimple_['sig'] > pMax
self.dfSimple_.drop(self.dfSimple_.index[loc2], inplace=True)
# ===================================================
# Update the parameter dataframe
# ===================================================
self._getParams(prob=prob)
def _getParams(self, prob=0.5, reset=True):
"""
Function for getting the parameters associated with plotting.
To call:
_getParams(prob=0.5)
Parameters:
prob threshold probabality for complex
Postcondition:
The source label, as well as the model's predicted probability
that the source is complex and the predicted label using the
threshold probability are stored in the data frame
self.dfParams_
"""
# ===================================================
# Reset check and verify data frames exist.
# ===================================================
self.__reset(reset)
# ===================================================
# Get the prediction probabilities
# ===================================================
probComplex = self.dfComplex_['prob'].values
probSimplex = self.dfSimple_['prob'].values
# ===================================================
# Create a data frame for storing
# ===================================================
Sprob = pd.Series(np.concatenate((probComplex, probSimplex)), name='prob')
label = pd.Series(np.concatenate((len(probComplex)*[1], len(probSimplex)*[0])), name='label')
Spred = pd.Series(np.where(Sprob > prob, 1, 0), name='pred')
self.dfParams_ = pd.concat([Sprob, Spred, label], axis=1)
def _getComplexParams(self, abs=True):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = | pd.Series(depth, name='depth') | pandas.Series |
"""This module is meant to contain the Solscan class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List, Dict
from .helpers import unpack_dataframe_of_dicts
import pandas as pd
#### Block
BLOCK_LAST_URL = 'https://public-api.solscan.io/block/last'
BLOCK_TRANSACTIONS_URL = 'https://public-api.solscan.io/block/transactions'
BLOCK_BLOCK_URL = Template('https://public-api.solscan.io/block/$block')
#### Transaction
TRANSACTION_LAST_URL = 'https://public-api.solscan.io/transaction/last'
TRANSACTION_SIGNATURE_URL = Template('https://public-api.solscan.io/transaction/$signature')
#### Account
ACCOUNT_TOKENS_URL = 'https://public-api.solscan.io/account/tokens'
ACCOUNT_TRANSACTIONS_URL = 'https://public-api.solscan.io/account/transactions'
ACCOUNT_STAKE_URL = 'https://public-api.solscan.io/account/stakeAccounts'
ACCOUNT_SPL_TXNS_URL = 'https://public-api.solscan.io/account/splTransfers'
ACCOUNT_SOL_TXNS_URL = 'https://public-api.solscan.io/account/solTransfers'
ACCOUNT_EXPORT_TXNS_URL = 'https://public-api.solscan.io/account/exportTransactions'
ACCOUNT_ACCOUNT_URL = Template('https://public-api.solscan.io/account/$account')
#### Token
TOKEN_HOLDERS_URL = 'https://public-api.solscan.io/token/holders'
TOKEN_META_URL = 'https://public-api.solscan.io/token/meta'
TOKEN_LIST_URL = 'https://public-api.solscan.io/token/list'
#### Market
MARKET_INFO_URL = Template('https://public-api.solscan.io/market/token/$tokenAddress')
#### Chain Information
CHAIN_INFO_URL = 'https://public-api.solscan.io/chaininfo'
#TODO max this clean/ not hardcoded? look into how this works
HEADERS={'accept': 'application/json', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} # pylint: disable=line-too-long
class Solscan(DataLoader):
"""This class is a wrapper around the Solscan API
"""
def __init__(self):
DataLoader.__init__(self, api_dict=None, taxonomy_dict=None)
#################
# Block endpoints
def get_last_blocks(self, num_blocks=1) -> pd.DataFrame:
"""returns info for last blocks (default is 1, limit is 20)
Parameters
----------
num_blocks: int (default is 1)
number of blocks to return, max is 20
Returns
-------
DataFrame
DataFrame with block information
"""
# Max value is 20 or API bricks
limit=num_blocks if num_blocks < 21 else 20
params = {'limit': limit}
last_blocks = self.get_response(BLOCK_LAST_URL,
params=params,
headers=HEADERS)
last_blocks_df = pd.DataFrame(last_blocks)
last_blocks_df.set_index('currentSlot', inplace=True)
last_blocks_df = unpack_dataframe_of_dicts(last_blocks_df)
# TODO, extract data from 'result'
return last_blocks_df
def get_block_last_transactions(self, blocks_in: Union[str, List],
offset=0, num_transactions=10) -> pd.DataFrame:
"""get last num_transactions of given block numbers
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
num_transactions: int (default is 10)
number of transactions to return
Returns
-------
DataFrame
dataframe with transaction details
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
params = {'block': block,
'offset': offset,
'limit': num_transactions}
txns = self.get_response(BLOCK_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
txns_df = pd.DataFrame(txns)
df_list.append(txns_df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_block(self, blocks_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given block(s)
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
Returns
-------
DataFrame
DataFrame with block information
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
endpoint_url = BLOCK_BLOCK_URL.substitute(block=block)
response = self.get_response(endpoint_url,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('currentSlot', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = fin_df.xs('result', axis=1, level=1)
return fin_df
#######################
# Transaction endpoints
def get_last_transactions(self, num_transactions=10) -> pd.DataFrame:
"""Return last num_transactions transactions
Parameters
----------
num_transactions: int (default is 10)
number of transactions to return, limit is 20
Returns
-------
DataFrame
dataframe with transaction details
"""
# 20
limit=num_transactions if num_transactions < 21 else 20
params = {'limit': limit}
response = self.get_response(TRANSACTION_LAST_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
fin_df = unpack_dataframe_of_dicts(df)
return fin_df
def get_transaction(self, signatures_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given transaction signature(s)
Parameters
----------
signatures_in: str, List
single signature in or list of signatures in
Returns
-------
DataFrame
DataFrame with transaction details
"""
signatures = validate_input(signatures_in)
series_list = []
for signature in signatures:
endpoint_url = TRANSACTION_SIGNATURE_URL.substitute(signature=signature)
response = self.get_response(endpoint_url,
headers=HEADERS)
#print(response)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=signatures, axis=1)
return fin_df
###################
# Account endpoints
def get_account_tokens(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return token balances of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with token balances of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TOKENS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_transactions(self, accounts_in: Union[str,List]) -> pd.DataFrame:
"""Return DataFrame of transactions of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with transactions of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_stake(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Get staking accounts of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with staking accounts of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_STAKE_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_spl_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SPL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SPL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SPL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_sol_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SOL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SOL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SOL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_export_transactions(self, accounts_in: Union[str, List],
type_in: str, from_time: int, to_time: int) -> List[str]:
"""Export transactions to CSV style string
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
type_in: str
what type of transactions to export:
- tokenchange
- soltransfer
- all
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
Returns
-------
List[str]
list of strings to make csv document
"""
accounts = validate_input(accounts_in)
csv_list=[]
for account in accounts:
params={'account': account,
'type': type_in,
'fromTime': from_time,
'toTime': to_time}
# NOTE: need to do this to not return json
response = self.session.get(ACCOUNT_EXPORT_TXNS_URL, params=params, headers=HEADERS)
csv = response.content.decode('utf-8')
csv_list.append(csv)
return csv_list
def get_account(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return overall account(s) information, including program account,
NFT metadata information
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with account info
"""
accounts = validate_input(accounts_in)
series_list = []
for account in accounts:
endpoint_url = ACCOUNT_ACCOUNT_URL.substitute(account=account)
response = self.get_response(endpoint_url,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=accounts, axis=1)
return fin_df
#################
# Token endpoints
def get_token_holders(self, tokens_in: Union[str, List],
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Return top token holders for given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with top token holders
"""
tokens = validate_input(tokens_in)
df_list = []
for token in tokens:
params={'tokenAddress': token,
'limit': limit,
'offset': offset}
response = self.get_response(TOKEN_HOLDERS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=tokens, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_token_meta(self, tokens_in: Union[str, List]) -> pd.DataFrame:
"""Return metadata of given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
Returns
-------
DataFrame
DataFrame with token metadata
"""
tokens = validate_input(tokens_in)
series_list = []
for token in tokens:
params={'tokenAddress': token}
response = self.get_response(TOKEN_META_URL,
params=params,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=tokens, axis=1)
return fin_df
def get_token_list(self, sort_by: str='market_cap', ascending: bool=True,
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Returns DataFrame of tokens
Parameters
----------
sort_by: str (default 'market_cap')
how to sort results, options are:
- market_cap
- volume
- holder
- price
- price_change_24h
- price_change_7d
- price_change_14d
- price_change_30d
- price_change_60d
- price_change_200d
- price_change_1y
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
ascending: bool
return results ascending or descending (default True)
Returns
-------
DataFrame
DataFrame with tokens
"""
direction = 'asc' if ascending else 'desc'
params={'sortBy': sort_by,
'direction': direction,
'limit': limit,
'offset': offset}
response = self.get_response(TOKEN_LIST_URL,
params=params,
headers=HEADERS)
token_list_df = pd.DataFrame(response['data'])
return token_list_df
##################
# Market endpoints
def get_market_info(self, tokens_in: Union[str, List]) -> pd.DataFrame:
"""Get market information of the given token
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses
Returns
-------
DataFrame
DataFrame containing market info for token(s)
"""
tokens = validate_input(tokens_in)
market_info_list = []
for token in tokens:
endpoint_url = MARKET_INFO_URL.substitute(tokenAddress=token)
market_info = self.get_response(endpoint_url,
headers=HEADERS)
market_info_series = pd.Series(market_info)
market_info_list.append(market_info_series)
market_info_df = pd.concat(market_info_list, keys=tokens, axis=1)
return market_info_df
#############################
# Chain Information endpoints
def get_chain_info(self) -> Dict:
"""Return Blockchain overall information
Returns
-------
Dict
Information about Solana blockchain
"""
chain_info = self.get_response(CHAIN_INFO_URL,
headers=HEADERS)
chain_info_df = | pd.Series(chain_info) | pandas.Series |
import os
import pandas as pd
import numpy as np
import click
from tqdm import tqdm
from unicodedata import normalize
from pjud import data
def consolidated_materia(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_ingresos_materia = pd.read_feather(f"{path_processed}/processes_IngresosMateria.feather")
df_termino_materia = pd.read_feather(f"{path_processed}/processes_TerminosMateria.feather")
df_fulldata_materia = pd.merge(df_ingresos_materia, df_termino_materia, how='outer', on=['COD. TRIBUNAL','RIT','COD. MATERIA'])
columnas_drop = ['index_x', 'index_y', 'MES INGRESO', 'MES TERMINO']
df_fulldata_materia.drop(columnas_drop, axis = 'columns', inplace = True)
click.echo('Transformando data faltante ...')
df_fulldata_materia = df_fulldata_materia.progress_apply(data.transformdata.faltantes_materia, axis=1)
columnas_drop = ['TIPO CAUSA_y', 'MATERIA_y', 'TRIBUNAL_y', 'COD. CORTE_y', 'CORTE_y', 'FECHA INGRESO_y']
df_fulldata_materia.drop(columnas_drop, axis = 'columns', inplace = True)
df_fulldata_materia.rename(columns = {'COD. CORTE_x':'COD. CORTE',
'CORTE_x':'CORTE',
'TRIBUNAL_x':'TRIBUNAL',
'TIPO CAUSA_x':'TIPO CAUSA',
'MATERIA_x':'MATERIA',
'FECHA INGRESO_x':'FECHA INGRESO'
}, inplace = True)
filtro_oral = df_fulldata_materia[df_fulldata_materia['TRIBUNAL'].str.contains('ORAL')]
filtro_garantia = df_fulldata_materia[df_fulldata_materia['TRIBUNAL'].str.contains('GARANTIA')]
data.save_feather(df_fulldata_materia, 'consolidated_Materia', path_processed)
data.save_feather(filtro_oral, 'consolidated_JuicioOralesMateria', path_processed)
data.save_feather(filtro_garantia, 'consolidated_CausasGarantiaMateria', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_rol(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_ingresos_rol = pd.read_feather(f"{path_processed}/processes_IngresosRol.feather")
df_termino_rol = pd.read_feather(f"{path_processed}/processes_TerminosRol.feather")
df_fulldata_rol = pd.merge(df_ingresos_rol, df_termino_rol, how='outer', on=['COD. TRIBUNAL','RIT'])
columnas_drop = ['index_x', 'index_y', 'MES INGRESO', 'MES TERMINO']
df_fulldata_rol.drop(columnas_drop, axis = 'columns', inplace = True)
click.echo('Transformando data faltante ...')
df_fulldata_rol = df_fulldata_rol.progress_apply(data.transformdata.faltantes_rol, axis=1)
columnas_drop = ['TIPO CAUSA_y', 'TRIBUNAL_y', 'COD. CORTE_y', 'CORTE_y', 'FECHA INGRESO_y']
df_fulldata_rol.drop(columnas_drop, axis = 'columns', inplace = True)
df_fulldata_rol.rename(columns = {'COD. CORTE_x':'COD. CORTE',
'CORTE_x':'CORTE',
'TRIBUNAL_x':'TRIBUNAL',
'TIPO CAUSA_x':'TIPO CAUSA',
'MATERIA_x':'MATERIA',
'FECHA INGRESO_x':'FECHA INGRESO'
}, inplace = True)
causas_top = df_fulldata_rol[df_fulldata_rol['TRIBUNAL'].str.contains('ORAL')]
causas_garantia = df_fulldata_rol[df_fulldata_rol['TRIBUNAL'].str.contains('GARANTIA')]
df_rit_cero = df_fulldata_rol[df_fulldata_rol['RIT'].str.startswith("0-")]
df_fulldata_rol.drop(df_rit_cero.index, axis=0, inplace=True)
data.save_feather(df_fulldata_rol, 'consolidated_Rol', path_processed)
data.save_feather(causas_top, 'consolidated_JuicioOralesRol', path_processed)
data.save_feather(causas_garantia, 'consolidated_CausasGarantiaRol', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_materia_rol(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_materia = pd.read_feather(f"{path_processed}/consolidated_Materia.feather")
df_rol = pd.read_feather(f"{path_processed}/consolidated_Rol.feather")
df_union = pd.merge(df_rol, df_materia, how='left', on=['COD. CORTE','COD. TRIBUNAL','RIT'], indicator=True)
columnas_duplicadas = ['index_x', 'index_y','CORTE_y', 'TRIBUNAL_y',
'TIPO CAUSA_y', 'FECHA INGRESO_y',
'AÑO INGRESO_y', 'FECHA TERMINO_y',
'AÑO TERMINO_y', 'MOTIVO TERMINO_y','DURACION CAUSA_y',
'TOTAL TERMINOS_y', '_merge']
df_union.drop(columnas_duplicadas, axis='columns', inplace=True)
df_union.rename(columns = {'CORTE_x':'CORTE',
'TRIBUNAL_x':'TRIBUNAL',
'TIPO CAUSA_x':'TIPO CAUSA',
'FECHA INGRESO_x':'FECHA INGRESO',
'AÑO INGRESO_x':'AÑO INGRESO',
'FECHA TERMINO_x':'FECHA TERMINO',
'AÑO TERMINO_x':'AÑO TERMINO',
'MOTIVO TERMINO_x':'MOTIVO TERMINO',
'DURACION CAUSA_x':'DURACION CAUSA',
'TOTAL TERMINOS_x':'TOTAL TERMINOS'
},inplace = True)
data.save_feather(df_union, 'consolidated_Materia_Rol', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_fulldata_causa(path_processed = "data/processed/pjud"):
tqdm.pandas()
path_delitos = 'data/processed/delitos'
df_causas = pd.read_feather(f"{path_processed}/consolidated_Materia_Rol.feather")
df_tipologia = pd.read_feather(f"{path_delitos}/clean_Delitos.feather")
df_poblacion = pd.read_feather(f"{path_processed}/processes_DataConsolidada_Poblacion_Jurisdiccion.feather")
# Unificar RIT y Tribunal en una sola columna para evitar mala interpretacion de causas
df_causas['TRIBUNAL-RIT'] = df_causas['COD. TRIBUNAL'].map(str) + "-" + df_causas['RIT'].map(str)
# Carga Data relacionada a Tipologia de delitos
df_causa_tipologia = pd.merge(df_causas,df_tipologia, how='left', on=['COD. MATERIA'])
columnas_duplicadas = ['index_x', 'MATERIA_x','index_y']
df_causa_tipologia.drop(columnas_duplicadas, axis='columns', inplace=True)
df_causa_tipologia.rename(columns = {'MATERIA_y':'MATERIA'}, inplace=True)
# Carga Data relacionada a Poblacion
df_fulldatacausa = pd.merge(df_causa_tipologia, df_poblacion, how='left', on=['CORTE','TRIBUNAL'])
columnas_duplicadas = ['index']
df_fulldatacausa.drop(columnas_duplicadas, axis='columns', inplace=True)
# Reordenando Nombres de las columnas ...
df_fulldatacausa.rename(columns = { 'COD. CORTE':'cod_corte',
'COD. TRIBUNAL':'cod_tribunal',
'RIT':'rit',
'COD. MATERIA':'cod_materia',
'TOTAL INGRESOS POR MATERIAS':'total_ingresos_materia',
'FECHA INGRESO':'fecha_ingreso',
'AÑO INGRESO':'año_ingreso',
'FECHA TERMINO':'fecha_termino',
'DURACION CAUSA':'duracion_causa',
'MOTIVO TERMINO':'motivo_termino',
'AÑO TERMINO':'año_termino',
'TOTAL TERMINOS':'total_terminos',
'CORTE':'corte',
'TRIBUNAL':'tribunal',
'TIPO CAUSA':'tipo_causa',
'TRIBUNAL-RIT':'tribunal_rit',
'MATERIA':'materia',
'TIPOLOGIA MATERIA':'tipologia_materia',
'VIGENCIA MATERIA':'vigencia_materia',
'REGION':'region',
'POBLACION':'poblacion',
'HOMBRES':'hombres',
'MUJERES':'mujeres',
'URBANO':'urbano',
'RURAL':'rural',
'COMUNAS':'comunas',
'JUECES':'dotacion_jueces',
'ASIENTO':'asiento',
'TIPO JUZGADO':'tipo_juzgado'
},inplace = True)
df_fulldatacausa = df_fulldatacausa[['region','cod_corte','corte','tribunal_rit','cod_tribunal','rit','tribunal','tipo_juzgado','dotacion_jueces','tipo_causa','fecha_ingreso','año_ingreso','cod_materia','materia',
'tipologia_materia','vigencia_materia','total_ingresos_materia','total_terminos','fecha_termino','año_termino','duracion_causa','motivo_termino','asiento','comunas','poblacion',
'hombres','mujeres','urbano','rural']]
data.save_feather(df_fulldatacausa, 'consolidated_FullData_Causa', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_fulldata_audiencias(path_processed = "data/processed/pjud"):
tqdm.pandas()
df_audiencias = pd.read_feather(f"{path_processed}/processes_Audiencias.feather")
df_poblacion = pd.read_feather(f"{path_processed}/processes_DataConsolidada_Poblacion_Jurisdiccion.feather")
df_audiencias['TRIBUNAL-RIT'] = df_audiencias['COD. TRIBUNAL'].map(str) + "-" + df_audiencias['RIT'].map(str)
df_audiencias['AÑO INGRESO'] = df_audiencias['RIT'].progress_apply(data.cleandata.obtiene_año)
columnas_duplicadas = ['level_0', 'index']
df_audiencias.drop(columnas_duplicadas, axis='columns', inplace=True)
df_fulldataaudiencias = pd.merge(df_audiencias, df_poblacion, how='left', on=['CORTE','TRIBUNAL'])
columnas_duplicadas = ['index']
df_fulldataaudiencias.drop(columnas_duplicadas, axis='columns', inplace=True)
df_fulldataaudiencias.rename(columns = {'COD. CORTE':'cod_corte',
'COD. TRIBUNAL':'cod_tribunal',
'RIT':'rit',
'CORTE':'corte',
'TRIBUNAL':'tribunal',
'TIPO CAUSA':'tipo_causa',
'TIPO DE AUDIENCIA':'tipo_audiencia',
'FECHA PROGRAMACION AUDIENCIA':'fecha_programacion_audiencia',
'FECHA AUDIENCIA':'fecha_audiencia',
'DIAS AGENDAMIENTO':'dias_agendamiento',
'DURACION AUDIENCIA (MIN)':'duracion_audiencia_minutos',
'TOTAL AUDIENCIAS':'total_audiencias',
'TRIBUNAL-RIT':'tribunal_rit',
'AÑO INGRESO':'año_ingreso',
'REGION':'region',
'POBLACION':'poblacion',
'HOMBRES':'hombres',
'MUJERES':'mujeres',
'URBANO':'urbano',
'RURAL':'rural',
'COMUNAS':'comunas',
'JUECES':'dotacion_jueces',
'ASIENTO':'asiento',
'TIPO JUZGADO':'tipo_juzgado'
},inplace = True)
df_fulldataaudiencias = df_fulldataaudiencias[['region','cod_corte','corte','tribunal_rit','cod_tribunal','rit','tribunal','tipo_juzgado','dotacion_jueces','tipo_causa','año_ingreso',
'tipo_audiencia','fecha_programacion_audiencia','fecha_audiencia','dias_agendamiento','duracion_audiencia_minutos','total_audiencias',
'asiento','comunas','poblacion','hombres','mujeres','urbano','rural']]
data.save_feather(df_fulldataaudiencias, 'consolidated_FullData_Audiencias', path_processed)
click.echo('Generado archivo Feather. Proceso Terminado')
def consolidated_fulldata_inventario(path_processed = "data/processed/pjud"):
tqdm.pandas()
path = "data/processed/delitos"
df_inventario = pd.read_feather(f"{path_processed}/processes_Inventario.feather")
df_tipologia = pd.read_feather(f"{path}/clean_Delitos.feather")
df_poblacion = | pd.read_feather(f"{path_processed}/processes_DataConsolidada_Poblacion_Jurisdiccion.feather") | pandas.read_feather |
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data, wb
from datetime import timedelta
def getValue(path):
df = pd.read_csv(path+".csv", index_col=0)
high_9 = df['high'].rolling(window= 9).max()
low_9 = df['low'].rolling(window= 9).min()
df['tenkan_sen'] = (high_9 + low_9) /2
high_26 = df['high'].rolling(window= 26).max()
low_26 = df['low'].rolling(window= 26).min()
df['kijun_sen'] = (high_26 + low_26) /2
# this is to extend the 'df' in future for 26 days
# the 'df' here is numerical indexed df
last_index = df.iloc[-1:].index[0]
last_date = | pd.to_datetime(df['date'].iloc[-1]) | pandas.to_datetime |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
rng=np.random.RandomState(0)
X=rng.randn(3,400)
p=rng.rand(10,3) # Random projection into 10d
X=np.dot(p, X)
print(X)
df= | pd.DataFrame(X.T) | pandas.DataFrame |
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2017, University of Technology Graz"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>, <NAME>"
import codecs
import json
import warnings
import io
import os
import re
from zipfile import ZipFile
import pandas as pd
import requests
from pandas.errors import ParserError
from .sww_utils import guess_freq
ENCODING = 'iso8859'
def csv_args(unix=False):
"""
how to read and write csv file
differs between windows and unix (mac+linux)
Args:
unix (bool): if it is a unix computer
Returns:
dict: arguments to read and write csv files
"""
if unix:
return dict(sep=',', decimal='.')
else:
return dict(sep=';', decimal=',')
def check_path(pth=None):
"""
use the local directory if not path is give
Args:
pth (str): path to directory
Returns:
str: path to directory
"""
if pth == '':
return pth
elif pth is None:
return ''
elif os.path.isdir(pth):
return pth
else:
raise UserWarning('Path is not available')
PARQUET_ERROR = ModuleNotFoundError("""Error: Unable to find a usable engine to read or write parquet files.
A suitable version of pyarrow (recommended) or fastparquet (alternative) is required for parquet support.
Use pip or conda to install:
- pyarrow (https://pypi.org/project/pyarrow/) or
- fastparquet (https://pypi.org/project/fastparquet/).""")
def export_series(series, filename, export_path=None, save_as='csv', unix=False):
"""
export the series to a given format
may be extended
Args:
series (pandas.Series):
filename (str): name of the file
export_path (str): path where the file will be stored.
save_as (str): export format
unix (bool): whether to use "," or ";" for the csv
Returns:
str: path to created file
"""
fn = os.path.join(check_path(export_path), '{}.{}'.format(filename, save_as))
if save_as == 'csv':
series.to_csv(fn, **csv_args(unix))
elif save_as == 'parquet':
try:
series.to_frame().to_parquet(fn)
except ImportError:
raise PARQUET_ERROR
else:
raise NotImplementedError('Sorry, but only csv files are implemented. Maybe there will be more options soon.')
return fn
def import_series(filename, series_label='precipitation', index_label='datetime', unix=False):
"""
Args:
filename (str):
series_label (str):
index_label (str):
unix (bool): whether to use "," or ";" for the csv
Returns:
pandas.Series: rain series
"""
if filename.endswith('csv'):
try:
ts = pd.read_csv(filename, index_col=0, header=0, squeeze=True, **csv_args(unix))
ts.index = pd.to_datetime(ts.index)
ts.name = series_label
ts.index.name = index_label
return ts
except (ParserError, UnicodeDecodeError):
return read_ehyd_file(filename)
elif filename.endswith('parquet'):
try:
return pd.read_parquet(filename).iloc[:, 0].asfreq('T').copy()
except ImportError:
raise PARQUET_ERROR
else:
raise NotImplementedError('Sorry, but only csv files are implemented. Maybe there will be more options soon.')
# ######################################################################################################################
STATIONS_PRECIPITATION_HIGH_RES = json.load(open(os.path.join(os.path.dirname(__file__), 'ehyd_stations.json'), 'r', encoding='utf-8'))
"""Niederschlagsstationen mit Langzeitserie mit Minutensummen"""
def get_high_res_station(identifier):
"""
get the name of the station based on the id number
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
Returns:
str: name of the station
"""
return STATIONS_PRECIPITATION_HIGH_RES[identifier]
class DATA_KIND:
MEASUREMENT = 'MessstellenExtraData'
DESIGN_PRECIPITATION = 'BemessungsniederschlagExtraData'
class FIELDS:
NIEDERSCHLAG = 'nlv'
QUELLEN = 'qu'
GRUNDWASSER = 'gw'
OBERFLAECHENWASSER = 'owf'
PDF = 'pdf'
_path_file = os.path.dirname(__file__)
_stations_files = {FIELDS.NIEDERSCHLAG: 'niederschl_lufttemp_verdunst.csv',
FIELDS.QUELLEN: 'unteririsches_wasser.csv',
FIELDS.OBERFLAECHENWASSER: 'oberflaechenwasser.csv'}
def get_ehyd_station_frame(field):
return pd.read_csv(os.path.join(_path_file, _stations_files[field]), index_col=0, header=0)
EHYD_STATIONS = {k: get_ehyd_station_frame(k).to_dict(orient='index') for k, v in _stations_files.items()}
def get_ehyd_stations(field=FIELDS.NIEDERSCHLAG):
"""
Stationsinformationen wie: Mst. ID,Messstellen Name,Jahr,Bundesland,Flussgebiet,Seehöhe
Args:
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
Returns:
dict: Stationsinformationen
"""
return EHYD_STATIONS[field]
def get_basic_station_meta(identifier, field=FIELDS.NIEDERSCHLAG):
"""
Stationsinformationen wie: Mst. ID,Messstellen Name,Jahr,Bundesland,Flussgebiet,Seehöhe
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
Returns:
dict: Stationsinformationen
"""
return get_ehyd_stations(field)[identifier]
def _get_url(identifier, data_kind=DATA_KIND.MEASUREMENT, field=FIELDS.NIEDERSCHLAG, file_number=2):
"""
get the URL to the specific file
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
data_kind (str): MessstellenExtraData; BemessungsniederschlagExtraData (use constant struct: `DATA_KIND`)
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
file_number: 1 - ... (see available files with function `available_files`)
Example-URLS:
- https://ehyd.gv.at/eHYD/MessstellenExtraData/qu?id=395293&file=1
- https://ehyd.gv.at/eHYD/MessstellenExtraData/nlv?id=101063&file=1
- https://ehyd.gv.at/eHYD/MessstellenExtraData/gw?id=325274&file=1
- https://ehyd.gv.at/eHYD/MessstellenExtraData/owf?id=211912&file=2
- https://ehyd.gv.at/eHYD/BemessungsniederschlagExtraData?id=4108
- https://ehyd.gv.at/eHYD/BemessungsniederschlagExtraData/pdf?id=3499
Returns:
str: url to the file
"""
url = f'https://ehyd.gv.at/eHYD/{data_kind}'
if field:
url += f'/{field}'
url += f'?id={identifier}'
if file_number:
url += f'&file={file_number}'
return url
_REQUESTS = dict()
def _get_request(identifier, data_kind=DATA_KIND.MEASUREMENT, field=FIELDS.NIEDERSCHLAG, file_number=1) -> requests.Response:
"""get request of website"""
url = _get_url(identifier=identifier, field=field, file_number=file_number, data_kind=data_kind)
if url not in _REQUESTS:
_REQUESTS[url] = requests.get(url, allow_redirects=True)
return _REQUESTS[url]
def _file_available(r: requests.Response) -> bool:
"""if any file is available in give request"""
return 'content-disposition' in r.headers
def _get_filename(r: requests.Response) -> str:
"""get filename of request"""
# h['content-disposition'] = 'attachment; filename=N-Minutensummen-112086.zip'
return r.headers['content-disposition'].split('filename=')[1]
def available_files(identifier, field=FIELDS.NIEDERSCHLAG, data_kind=DATA_KIND.MEASUREMENT):
"""
get the file of the series of the station
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
data_kind (str): MessstellenExtraData; BemessungsniederschlagExtraData (use constant struct: `DATA_KIND`)
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
Returns:
dict[int, str]: dictionary of {file-number: file-name}
"""
files = dict()
for file_number in range(1, 15):
r = _get_request(identifier=identifier, field=field, file_number=file_number, data_kind=data_kind)
if _file_available(r):
files[file_number] = _get_filename(r)
else:
break
return files
def _get_file_from_request(r: requests.Response) -> [str, (io.TextIOWrapper or io.IOBase)]:
filename = _get_filename(r)
if ('N-Minutensummen' in filename) and ('.zip' in filename):
c = r.content
z = ZipFile(io.BytesIO(c))
filename = z.namelist()[0]
csv_file = io.TextIOWrapper(z.open(filename), encoding=ENCODING)
return csv_file, filename.split('.')[0]
elif ('.csv' in filename) or ('.txt' in filename):
csv_file = io.TextIOWrapper(io.BytesIO(r.content), encoding=ENCODING)
return csv_file, filename.split('.')[0]
else:
return None, None
def _get_file(identifier, field=FIELDS.NIEDERSCHLAG, file_number=1, data_kind=DATA_KIND.MEASUREMENT):
"""
get the file of the series of the station
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
data_kind (str): MessstellenExtraData; BemessungsniederschlagExtraData (use constant struct: `DATA_KIND`)
file_number (int): file-number (>= 1)
Returns:
((io.TextIOWrapper | io.IOBase), str): data file
"""
r = _get_request(identifier=identifier, field=field, file_number=file_number, data_kind=data_kind)
if _file_available(r):
csv_file, filename = _get_file_from_request(r)
if csv_file is None:
raise NotImplementedError('This kind of request is not implemented (yet?). Sorry!')
return csv_file, filename
def get_ehyd_files(identifier, field=FIELDS.NIEDERSCHLAG, data_kind=DATA_KIND.MEASUREMENT):
"""
get the files of the series of one station
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
data_kind (str): MessstellenExtraData; BemessungsniederschlagExtraData (use constant struct: `DATA_KIND`)
Returns:
dict: with key=filename and value=tuple(meta_data, timeseries_data)
"""
files = dict()
for file_number, filename in available_files(identifier, field=field, data_kind=data_kind).items():
if file_number == 1:
files[filename] = get_station_reference_data(identifier, field=field, data_kind=data_kind)
else:
r = _get_request(identifier=identifier, field=field, file_number=file_number, data_kind=data_kind)
files[filename] = read_ehyd_file(*_get_file_from_request(r))
return files
def _parse_meta_data(meta_str):
# print('#' * 100)
# print(meta_str)
meta = dict(_raw=meta_str)
currant_table = None
currant_header = None
table_key = None
is_table = False
sep = re.compile(r':\s+')
lines = iter(meta_str.split('\n'))
for line in lines:
if not is_table and line.endswith(':'):
# start first table
is_table = True
table_key = line[:-1].split(': ')[0]
currant_header = re.split(sep, line.strip().strip(':'))
currant_table = list()
elif is_table and not line.startswith(' ') and line.endswith(':'):
# end table | start new table
meta[table_key] = currant_table
# ------------
table_key = line[:-1].split(': ')[0]
currant_header = re.split(sep, line.strip().strip(':'))
currant_table = list()
elif is_table and line.startswith(' ') and line.endswith(':'):
# line is header
currant_header = re.split(sep, line.strip().strip(':'))
elif is_table and line.startswith(' '):
# values in table
values = re.split(r'\s\s+', line.strip())
currant_table.append(dict(zip(currant_header, values)))
elif ':' in line:
# simple key: value | end table here
key, *value = re.split(sep, line)
value = ': '.join(value)
if key in meta:
if isinstance(meta[key], str):
meta[key] = [meta[key], value]
elif isinstance(meta[key], list):
meta[key].append(value)
else:
meta[key] = value
if is_table:
meta[table_key] = currant_table
is_table = False
elif line.strip() == '':
# empty line | end table
pass
else:
value = line.strip()
if key in meta:
if isinstance(meta[key], str):
meta[key] = [meta[key], value]
elif isinstance(meta[key], list):
meta[key].append(value)
else:
meta[key] = value
# print('UNKOWN:', line)
if table_key not in meta:
meta[table_key] = currant_table
# print(json.dumps(meta, indent=4, ensure_ascii=False))
return meta
def translate_meta_dict(meta):
from translate import Translator
translator = Translator(from_lang='de', to_lang="en")
translation = translator.translate("This is a pen.")
meta_translate = dict()
for k in meta.keys():
meta_translate[translator.translate(k)] = meta[k]
return meta_translate
def get_station_reference_data(identifier, field=FIELDS.NIEDERSCHLAG, data_kind=DATA_KIND.MEASUREMENT):
"""
get the station reference data (=Stammdaten der Station)
Args:
identifier (int): Gitterpunktnummer or HZBNR of the station
field (str): nlv; qu; gw; owf (use constant struct: `FIELDS`)
data_kind (str): MessstellenExtraData; BemessungsniederschlagExtraData (use constant struct: `DATA_KIND`)
Returns:
dict: meta data of station
"""
# parse_meta_data(_get_file(identifier=identifier, field=field, file_number=1, data_kind=data_kind).read())
return _parse_meta_data(_get_file(identifier=identifier, field=field, file_number=1, data_kind=data_kind)[0].read())
def _split_file(file):
"""
split the file in meta-data and time-series-data
Args:
file (io.IOBase): file of the series
Returns:
(list, list): meta-data and time-series-data
"""
lines = file.readlines()
file.close()
i = lines.index('Werte:\n')
meta = lines[:i]
data = lines[i+1:]
return meta, data
def read_ehyd_file(filepath_or_buffer, series_label='data'):
"""
read ehyd data file
Args:
filepath_or_buffer (io.IOBase | str):
series_label (str): name of the series
Returns:
pandas.Series: time-series with meta-data ts.attrs
"""
if isinstance(filepath_or_buffer, str):
csv_file = codecs.open(filepath_or_buffer, 'r', encoding=ENCODING)
elif isinstance(filepath_or_buffer, io.IOBase):
csv_file = filepath_or_buffer
else:
raise NotImplementedError()
# ___________________________
meta, data = _split_file(csv_file)
# ___________________________
ts = pd.read_csv(io.StringIO('\n'.join(data).replace(' ', '')), sep=';', decimal=',', index_col=0,
na_values='Lücke', header=None, squeeze=True, # names=[series_label],
date_parser=lambda s: | pd.to_datetime(s, format='%d.%m.%Y%H:%M:%S') | pandas.to_datetime |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = | pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1) | pandas.concat |
import re
import logging
from functools import reduce, partial
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from influxdb.resultset import ResultSet
from requests.exceptions import RequestException
from .connection import get_client, InfluxDBException, _timeout
from .util import aslist, asstr
from .db import _check_table, _CATEGORICAL_COLUMNS, AGGREGATE
from . import db
__all__ = ['query', 'query_async', 'getdf']
log = logging.getLogger(__name__)
def query(query: str, **kwargs) -> ResultSet:
"""
Fetch results of a raw SQL query.
Parameters
----------
query : str
An SQL query to fetch results for.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Returns
-------
influxdb.resultset.ResultSet
"""
try:
client = get_client()
except InfluxDBException:
log.exception('Failed to instantiate InfluxDB client:')
raise
kwargs.setdefault('epoch', 'ms')
try:
log.debug('Executing query: %s', query)
result = client.query(query, **kwargs)
log.debug('Result set size: %d, %d rows', len(result), len(tuple(result.get_points())))
return result
except RequestException:
log.error('Failed to execute query in %d seconds: %s', _timeout, query)
raise
except InfluxDBException:
log.error('Failed to execute query: %s', query)
raise
def query_async(queries: list, callback=None, **kwargs) -> ResultSet:
"""
Generator fetching results of SQL queries in an asynchronous manner.
Parameters
----------
queries : list of str
An list of SQL queries to fetch results for.
callback : callable
The function to call after each successfully executed query.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Yields
------
influxdb.resultset.ResultSet
"""
if isinstance(queries, str):
queries = [queries]
with ThreadPoolExecutor(max_workers=len(queries)) as executor:
try:
for future in as_completed((executor.submit(query, query_str, **kwargs)
for query_str in queries),
# +1 to allow InfluxDBClient (requests) to fail first
timeout=_timeout + 1):
yield future.result()
if callback:
callback()
except (futures.TimeoutError, RequestException):
log.error("Failed to execute all queries in %d seconds: %s", _timeout, queries)
raise
def _query_str(table, *, freq, columns='', where='', resample='', limit=1000):
parts = ['SELECT {columns} FROM {table}_{freq}'.format(
columns=asstr(columns) or (table._select_agg() if resample else '*'),
table=str(table),
freq=freq)]
if where:
where = aslist(where, str)
parts.append('WHERE ' + ' AND '.join(where))
if resample:
resample = 'time({}), '.format(resample)
parts.append('GROUP BY ' + (resample + table._groupby()).lstrip(','))
if limit:
parts.append('LIMIT ' + str(int(limit)))
query_str = ' '.join(parts)
return query_str
def merge_asof_helper (left, right, tolerance=None):
#https://github.com/pandas-dev/pandas/issues/16454 pandas doesnt allow multiple pd.Categorical "by" values?, dirty hacks
if 'time' in left.columns.values.tolist():
left.time = pd.to_datetime(left.time, unit='ms')
left.set_index('time', inplace=True)
left.sort_index(inplace=True)
right.time = pd.to_datetime(right.time, unit='ms')
right.set_index('time', inplace=True)
right.sort_index(inplace=True)
temp = pd.merge_asof(left, right, left_index=True, right_index=True,
by=[a for a in list(set(left.columns.values.tolist()).intersection(right.columns.values.tolist()))
if a not in ['Interface','Operator'] ],
direction='backward', tolerance=tolerance, suffixes=('_left', '_right'))
temp.rename(columns=lambda x: x if not x.endswith('_left') else x[:-len('_left')], inplace=True) # rename left cols, there is more data in it
temp.drop(columns=[x for x in temp.columns.values.tolist() if x.endswith('_right')], inplace=True, axis=1) # drop right cols, not so much data
return temp
def getdf(tables, *, nodeid='', where='', limit=100000,
start_time=None, end_time=None,
freq=None, resample='',
interpolate=False,
tolerance=None,
callback=None) -> pd.DataFrame:
"""
Return MONROE data as Pandas DataFrame.
Parameters
----------
tables : str or list of str
Table name(s) to query and merge. Tables can be from the list
as retuend by ``all_tables()``.
nodeid : int or str or list of int or str
A single node ID or a list thereof. If empty, results for all
available nodes are returned.
where : str or list of str
Additional SQL WHERE conditions.
limit : int
Hard-limit on the number of rows requested from the DB for each
NodeId.
start_time : str or datetime or pandas.Timestamp
Query results after start time. Default is set to 14 days before
`end_time` or the min timestamp of `tables`, whichever is later.
end_time : str or datetime or pandas.Timestamp
Query results before end time. Default is set to now or the
max timestamp of `tables`, whichever is sooner.
freq : str, from {'10ms', '1s', '1m', '30m'}
The level of detail to query. Higher precision results in MORE
data. By default, `freq` is set to a sensible (manageable) value
based on query time span.
resample : str
Resampling rule (such as '1h', '2h', '1d', ...) from
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
interpolate : str or bool, default False
Interpolation method supported by ``pandas.DataFrame.interpolate``,
or ``True`` for `linear` interpolation of missing values.
Rows are grouped by NodeId,Iccid before interpolation.
callback : callable
The function to call after each successfully executed query.
Returns
-------
pandas.DataFrame
"""
tables = list(map(_check_table, aslist(tables)))
if not tables:
raise ValueError('Need a table name to fetch')
if where and isinstance(where, str):
where = [where]
where = aslist(where or [], str)
if nodeid:
nodeid = aslist(nodeid, str)
nodedid_where = ['NodeId = {!r}'.format(str(node))
for node in nodeid]
where.append('(' + ' OR '.join(nodedid_where) + ')')
# Sanitize input date and time
start_time, end_time = _check_time(start_time, end_time, tables=tables)
where.append('time >= {!r}'.format(start_time.isoformat()))
where.append('time <= {!r}'.format(end_time.isoformat()))
# Determine correct level-of-detail table
freq = _check_freq(freq, tspan=end_time - start_time, nodeid=nodeid)
def _where_field_name(condition, _identifiers=re.compile(r'\w+').findall):
return _identifiers(condition)[0]
def _query_for_table(table, where, freq, limit, columns=''):
table_columns = {'time'} | set(table._columns())
_where = [cond for cond in where
if _where_field_name(cond) in table_columns]
return _query_str(table, columns=columns, freq=freq, where=_where, limit=limit)
# Construct queries with their applicable "where" parameters
queries = [_query_for_table(table, where, freq, limit)
for table in tables]
# If output will contain column Iccid, ensure it also contains modem.Interface
#if db.modem not in tables and any('Iccid' in table and table!='nettest' for table in tables):
# queries.append(_query_for_table(db.modem, where, freq, limit,
# columns=['Interface', 'Iccid']))
# Construct response data frames; One df per measurement per tag
dfs = []
for results in query_async(queries, callback=callback):
df = _result_set_to_df(results)
if df is not None:
dfs.append(df)
if not dfs:
return pd.DataFrame()
# Join all tables on intersecting columns, namely 'time', 'NodeId', 'IccId', ...
if (tolerance is not None) and (len(tables)>1):
df = reduce(partial(merge_asof_helper, tolerance=tolerance), sorted(dfs, key=lambda x: x.size, reverse=True))
else:
df = reduce(partial(pd.merge, how='outer', copy=False), dfs)
del dfs
# Transform known categorical columns into Categoricals
for col in df:
if col in _CATEGORICAL_COLUMNS:
df[col] = df[col].astype('category')
# Strip trailing '.0' in categoricals constructed from floats (ints upcasted via NaNs)
categories = df[col].cat.categories
if is_numeric_dtype(categories):
df[col].cat.categories = categories.astype(int).astype(str)
else:
# Avoid None values resulting in object dtype
df[col].fillna(np.nan, inplace=True)
# Index by time
if (tolerance is None) or (len(tables)==1):
df.time = | pd.to_datetime(df.time, unit='ms') | pandas.to_datetime |
import gc
print("############################################")
print("## 4.1. 결합, 마스터 테이블에서 정보 얻기 ")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
result=pd.merge(reserve_tb, hotel_tb, on='hotel_id', how='inner')\
.query('people_num == 1 & is_business')
print(hotel_tb.head())
print(reserve_tb.head())
print('------------------')
print(result)
result=pd.merge(reserve_tb.query('people_num == 1'),
hotel_tb.query('is_business'),
on='hotel_id', how='inner')
print('------------------')
print(result)
print("############################################")
print("## 4.2. 결합, 조건에 따라 결합할 마스터 테이블 변경하기")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
print(hotel_tb.head())
small_area_mst=hotel_tb\
.groupby(['big_area_name', 'small_area_name'], as_index=False)\
.size().reset_index()
small_area_mst.columns=['index','big_area_name', 'small_area_name', 'hotel_cnt']
print(small_area_mst.head())
small_area_mst['join_area_id']=\
np.where(small_area_mst['hotel_cnt']-1>=20,
small_area_mst['small_area_name'],
small_area_mst['big_area_name'])
small_area_mst.drop(['hotel_cnt', 'big_area_name'], axis=1, inplace=True)
print('-------------------------')
print(small_area_mst.head())
base_hotel_mst=pd.merge(hotel_tb, small_area_mst, on='small_area_name')\
.loc[:, ['hotel_id', 'join_area_id']]
print('-------------------------')
print(base_hotel_mst.head())
del small_area_mst
gc.collect()
print('1------------------------')
recommend_hotel_mst=pd.concat([\
hotel_tb[['small_area_name', 'hotel_id']]\
.rename(columns={'small_area_name': 'join_area_id'}, inplace=False),
hotel_tb[['big_area_name', 'hotel_id']]\
.rename(columns={'big_area_name': 'join_area_id'}, inplace=False)\
])
print(recommend_hotel_mst.head())
print('2------------------------')
recommend_hotel_mst.rename(columns={'hotel_id':'rec_hotel_id'}, inplace=True)
print(recommend_hotel_mst.head())
print('3------------------------')
result=pd.merge(base_hotel_mst, recommend_hotel_mst, on='join_area_id')\
.loc[:,['hotel_id', 'rec_hotel_id']]\
.query('hotel_id != rec_hotel_id')
print('4------------------------')
print('-------------------------')
print(base_hotel_mst.head())
print('-------------------------')
print(recommend_hotel_mst.head())
print('-------------------------')
print(result)
print("############################################")
print("## 4.3. 과거의 데이터 정보 얻기 (n번 이전 까지의 데이터")
print("############################################")
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb=pd.read_csv('./data/reserve.csv', encoding='UTF-8')
hotel_tb=pd.read_csv('./data/hotel.csv', encoding='UTF-8')
print(hotel_tb.head())
result=reserve_tb.groupby('customer_id')\
.apply(lambda x: x.sort_values(by='reserve_datetime', ascending=True))\
.reset_index(drop=True)
print(result)
result['price_avg']=pd.Series(
result.groupby('customer_id')
['total_price'].rolling(center=False, window=3, min_periods=1).mean()
.reset_index(drop=True)
)
print('-----------------')
print(result)
result['price_avg']=\
result.groupby('customer_id')['price_avg'].shift(periods=1)
print('-----------------')
print(result)
print("############################################")
print("## 4.4. 과거의 데이터 정보 얻기 (과거 n일의 합계)")
print("############################################")
import pandas as pd
import numpy as np
import pandas.tseries.offsets as offsets
import operator
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
reserve_tb= | pd.read_csv('./data/reserve.csv', encoding='UTF-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import os
import argparse
import types
import pandas as pd
import numpy as np
from pdsql import mssql
from datetime import datetime
import yaml
import itertools
import lowflows as lf
import util
pd.options.display.max_columns = 10
run_time_start = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
try:
#####################################
### Read parameters file
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters-test.yml')) as param:
param = yaml.safe_load(param)
# parser = argparse.ArgumentParser()
# parser.add_argument('yaml_path')
# args = parser.parse_args()
#
# with open(args.yaml_path) as param:
# param = yaml.safe_load(param)
## Integrety checks
use_types_check = np.in1d(list(param['misc']['use_types_codes'].keys()), param['misc']['use_types_priorities']).all()
if not use_types_check:
raise ValueError('use_type_priorities parameter does not encompass all of the use type categories. Please fix the parameters file.')
#####################################
### Read the hydro log
# max_date_stmt = "select max(RunTimeStart) from " + param.log_table + " where HydroTable='" + param.process_name + "' and RunResult='pass' and ExtSystem='" + param.ext_system + "'"
#
# last_date1 = mssql.rd_sql(server=param.hydro_server, database=param.hydro_database, stmt=max_date_stmt).loc[0][0]
#
# if last_date1 is None:
# last_date1 = '1900-01-01'
# else:
# last_date1 = str(last_date1.date())
#
# print('Last sucessful date is ' + last_date1)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for i, p in param['source data'].items():
setattr(db, i, mssql.rd_sql(p['server'], p['database'], p['table'], p['col_names'], rename_cols=p['rename_cols'], username=p['username'], password=p['password']))
if (p['database'] == 'Accela') & (not (p['table'] in ['Ecan.vAct_Water_AssociatedPermits', 'Ecan.vQA_Relationship_Actuals'])):
table1 = 'Accela.' + p['table'].split('Ecan.')[1]
print(table1)
t1 = getattr(db, i).copy().dropna(subset=p['pk'])
t1.drop_duplicates(p['pk'], inplace=True)
print('update in db')
new_ones, _ = mssql.update_from_difference(t1, param['output']['server'], param['output']['database'], table1, on=p['pk'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
######################################
### Populate base tables
print('--Update base tables')
## HydroGroup
hf1 = pd.DataFrame(param['misc']['HydroGroup'])
hf1['ModifiedDate'] = run_time_start
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf_diff1 = hf1[~hf1.HydroGroup.isin(hf0.HydroGroup)]
if not hf_diff1.empty:
mssql.to_mssql(hf_diff1, param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
## Activity
act1 = param['misc']['Activities']['ActivityType']
act2 = pd.DataFrame(list(itertools.product(act1, hf0.HydroGroupID.tolist())), columns=['ActivityType', 'HydroGroupID'])
act2['ModifiedDate'] = run_time_start
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act_diff1 = act2[~act2[['ActivityType', 'HydroGroupID']].isin(act0[['ActivityType', 'HydroGroupID']]).any(axis=1)]
if not act_diff1.empty:
mssql.to_mssql(act_diff1, param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
# Combine activity and hydro features
act_types1 = pd.merge(act0[['ActivityID', 'ActivityType', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID')
act_types1['ActivityName'] = act_types1['ActivityType'] + ' ' + act_types1['HydroGroup']
## AlloBlock
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
sw_blocks1 = pd.Series(db.wap_allo['sw_allo_block'].unique())
gw_blocks1 = pd.Series(db.allocated_volume['allo_block'].unique())
# Fixes
wap_allo1 = db.wap_allo.copy()
wap_allo1['sw_allo_block'] = wap_allo1['sw_allo_block'].str.strip()
wap_allo1.loc[wap_allo1.sw_allo_block == 'Migration: Not Classified', 'sw_allo_block'] = 'A'
allo_vol1 = db.allocated_volume.copy()
allo_vol1['allo_block'] = allo_vol1['allo_block'].str.strip()
allo_vol1.loc[allo_vol1.allo_block == 'Migration: Not Classified', 'allo_block'] = 'A'
# Determine blocks and what needs to be added
sw_blocks1 = set(wap_allo1['sw_allo_block'].unique())
gw_blocks1 = set(allo_vol1['allo_block'].unique())
blocks1 = sw_blocks1.union(gw_blocks1)
ab1 = pd.DataFrame(list(itertools.product(blocks1, hf0.HydroGroupID.tolist())), columns=['AllocationBlock', 'HydroGroupID'])
ab1['ModifiedDate'] = run_time_start
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab_diff1 = ab1[~ab1[['AllocationBlock', 'HydroGroupID']].isin(ab0[['AllocationBlock', 'HydroGroupID']]).any(axis=1)]
if not ab_diff1.empty:
mssql.to_mssql(ab_diff1, param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
# Combine alloblock and hydro features
ab_types1 = pd.merge(ab0[['AlloBlockID', 'AllocationBlock', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID').drop('HydroGroupID', axis=1)
## Attributes
att1 = pd.DataFrame(param['misc']['Attributes'])
att1['ModifiedDate'] = run_time_start
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att_diff1 = att1[~att1.Attribute.isin(att0.Attribute)]
if not att_diff1.empty:
mssql.to_mssql(att_diff1, param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
##################################################
### Sites and streamdepletion
print('--Update sites tables')
## takes
wap_allo1['WAP'] = wap_allo1['WAP'].str.strip().str.upper()
wap_allo1.loc[~wap_allo1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap1 = wap_allo1['WAP'].unique()
wap1 = wap1[~pd.isnull(wap1)]
## Diverts
div1 = db.divert.copy()
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap2 = div1['WAP'].unique()
wap2 = wap2[~pd.isnull(wap2)]
## Combo
waps = np.concatenate((wap1, wap2), axis=None)
## Check that all WAPs exist in the USM sites table
usm_waps1 = db.sites[db.sites.ExtSiteID.isin(waps)].copy()
usm_waps1[['NZTMX', 'NZTMY']] = usm_waps1[['NZTMX', 'NZTMY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.ExtSiteID))
print('Missing {} WAPs in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.WAP.isin(miss_waps)].copy()
## Update ConsentsSites table
cs1 = usm_waps1[['ExtSiteID', 'SiteName']].copy()
# cs1['SiteType'] = 'WAP'
new_sites, _ = mssql.update_from_difference(cs1, param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentsSites', 'pass', '{} sites updated'.format(len(new_sites)), username=param['output']['username'], password=param['output']['password'])
cs0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])
cs_waps2 = pd.merge(cs0, usm_waps1.drop('SiteName', axis=1), on='ExtSiteID')
cs_waps3 = pd.merge(cs_waps2, db.wap_sd, on='ExtSiteID').drop('ExtSiteID', axis=1).round()
new_waps, _ = mssql.update_from_difference(cs_waps3, param['output']['server'], param['output']['database'], 'SiteStreamDepletion', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'WAP', 'pass', '{} sites updated'.format(len(new_waps)), username=param['output']['username'], password=param['output']['password'])
## Read db table
# wap0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'SiteStreamDepletion')
## Make linked WAP-SiteID table
wap_site = cs0.rename(columns={'ExtSiteID': 'WAP'})
##################################################
### Permit table
print('--Update Permit table')
## Clean data
permits1 = db.permit.copy()
permits1['RecordNumber'] = permits1['RecordNumber'].str.strip().str.upper()
permits1['ConsentStatus'] = permits1['ConsentStatus'].str.strip()
permits1['EcanID'] = permits1['EcanID'].str.strip().str.upper()
permits1['FromDate'] = pd.to_datetime(permits1['FromDate'], infer_datetime_format=True, errors='coerce')
permits1['ToDate'] = pd.to_datetime(permits1['ToDate'], infer_datetime_format=True, errors='coerce')
permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'ToDate'] = permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'FromDate'] + pd.DateOffset(years=30)
permits1[['NZTMX', 'NZTMY']] = permits1[['NZTMX', 'NZTMY']].round()
permits1.loc[(permits1['FromDate'] < '1950-01-01'), 'FromDate'] = np.nan
permits1.loc[(permits1['ToDate'] < '1950-01-01'), 'ToDate'] = np.nan
## Filter data
permits2 = permits1.drop_duplicates('RecordNumber')
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NZTMX.notnull() & permits2.NZTMY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2['FromDate'] = permits2['FromDate'].dt.date
permits2['ToDate'] = permits2['ToDate'].dt.date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = '1900-01-01'
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = '1900-01-01'
## Save results
new_permits, _ = mssql.update_from_difference(permits2, param['output']['server'], param['output']['database'], 'Permit', on='RecordNumber', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Permit', 'pass', '{} rows updated'.format(len(new_permits)), username=param['output']['username'], password=param['output']['password'])
## Read db table
permits0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Permit', username=param['output']['username'], password=param['output']['password'])
##################################################
### Parent-Child
print('--Update Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
pc1['ParentRecordNumber'] = pc1['ParentRecordNumber'].str.strip().str.upper()
pc1['ChildRecordNumber'] = pc1['ChildRecordNumber'].str.strip().str.upper()
pc1['ParentCategory'] = pc1['ParentCategory'].str.strip()
pc1['ChildCategory'] = pc1['ChildCategory'].str.strip()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys
crc1 = permits0.RecordNumber.unique()
pc2 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
## Save results
new_pc, _ = mssql.update_from_difference(pc2, param['output']['server'], param['output']['database'], 'ParentChild', on=['ParentRecordNumber', 'ChildRecordNumber'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ParentChild', 'pass', '{} rows updated'.format(len(new_pc)), username=param['output']['username'], password=param['output']['password'])
## Read db table
pc0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ParentChild', username=param['output']['username'], password=param['output']['password'])
#################################################
### AllocatedRatesVolumes
print('--Update Allocation tables')
attr1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', ['AttributeID', 'Attribute'], username=param['output']['username'], password=param['output']['password'])
## Rates
# Clean data
wa1 = wap_allo1.copy()
wa1['RecordNumber'] = wa1['RecordNumber'].str.strip().str.upper()
wa1['take_type'] = wa1['take_type'].str.strip().str.title()
wa1['FromMonth'] = wa1['FromMonth'].str.strip().str.title()
wa1['ToMonth'] = wa1['ToMonth'].str.strip().str.title()
wa1['IncludeInSwAllocation'] = wa1['IncludeInSwAllocation'].str.strip().str.title()
wa1['AllocatedRate'] = | pd.to_numeric(wa1['AllocatedRate'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
id
import itertools
import datetime
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
import statistics
import pysolar
#-----------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Codigo para la relación y análisis de los indices de cielo despejado (Kt*) y el indice de claridad (Kt).'
'Se incluye tambien un análisis de su tasa de cambio para evaluar su variabilidad y junto con la fracción'
'de cobertura de nubes. Se hace sobre los datos historicos porque se quiere analizar la variabilidad.'
Theoric_Model = 'GIS' ##---> 'GIS' para que coja el de Gis o 'Piranometro' para que tome el de el piranometro
##############################################################################
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
##############################################################################
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################################
##-----------------------------------LECTURA DE LOS DATOS DE PIRANOMETRO-------------------------------##
##########################################################################################################
df_pira_TS = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018_2019.txt', parse_dates=[2])
df_pira_TS = df_pira_TS.set_index(["fecha_hora"])
df_pira_TS.index = df_pira_TS.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_TS.index = df_pira_TS.index.tz_localize(None)
df_pira_TS = df_pira_TS[df_pira_TS['radiacion'] >=0]
df_pira_CI = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018_2019.txt', parse_dates=[2])
df_pira_CI = df_pira_CI.set_index(["fecha_hora"])
df_pira_CI.index = df_pira_CI.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_CI.index = df_pira_CI.index.tz_localize(None)
df_pira_CI = df_pira_CI[df_pira_CI['radiacion'] >=0]
df_pira_JV = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018_2019.txt', parse_dates=[2])
df_pira_JV = df_pira_JV.set_index(["fecha_hora"])
df_pira_JV.index = df_pira_JV.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_JV.index = df_pira_JV.index.tz_localize(None)
df_pira_JV = df_pira_JV[df_pira_JV['radiacion'] >=0]
## ------------------------------------DATOS HORARIOS DE RADIACON----------------------------- ##
df_pira_JV_h = df_pira_JV.groupby(pd.Grouper(freq="H")).mean()
df_pira_CI_h = df_pira_CI.groupby(pd.Grouper(freq="H")).mean()
df_pira_TS_h = df_pira_TS.groupby( | pd.Grouper(freq="H") | pandas.Grouper |
import numpy as np
import pandas as pd
import scripts.data as d
def compute_similarity_matrix(df, method="cosine"):
similarity_matrix = np.zeros([df.shape[1], df.shape[1]])
for i, offer1 in enumerate(df.columns):
for j, offer2 in enumerate(df.columns):
mask = df[offer1].notna() & df[offer2].notna()
if method == "cosine":
numerator = sum(df.loc[mask, offer1] * df.loc[mask, offer2])
denominator = np.sqrt(sum(df.loc[mask, offer1] ** 2)) * np.sqrt(
sum(df.loc[mask, offer2] ** 2)
)
similarity_matrix[i, j] = (
numerator / denominator if denominator != 0 else np.nan
)
similarity_matrix_df = pd.DataFrame(similarity_matrix, columns=df.columns)
similarity_matrix_df.index = df.columns
return similarity_matrix_df
def compute_similarities():
portfolio_pp, profile_pp, transcript_pp = d.read_and_preprocess()
events_binary = pd.get_dummies(transcript_pp["event"])
transcript_comb = pd.concat([transcript_pp, events_binary], axis=1)
# Create the user-offer matrix based on the proportion of times an offer is completed
user_offer_actions = transcript_comb.groupby(["person", "offer_id"]).agg(
offer_completed_sum=pd.NamedAgg(column="offer completed", aggfunc="sum"),
offer_viewed_sum= | pd.NamedAgg(column="offer viewed", aggfunc="sum") | pandas.NamedAgg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.