prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
# -*- coding: utf-8 -*- from datetime import timedelta, time import numpy as np from pandas import (DatetimeIndex, Float64Index, Index, Int64Index, NaT, Period, PeriodIndex, Series, Timedelta, TimedeltaIndex, date_range, period_range, timedelta_range, notnull) import pandas.util.testing as tm import pandas as pd from pandas.lib import Timestamp from .common import Base class DatetimeLike(Base): def test_shift_identity(self): idx = self.create_index() self.assert_index_equal(idx, idx.shift(0)) def test_str(self): # test the string repr idx = self.create_index() idx.name = 'foo' self.assertFalse("length=%s" % len(idx) in str(idx)) self.assertTrue("'foo'" in str(idx)) self.assertTrue(idx.__class__.__name__ in str(idx)) if hasattr(idx, 'tz'): if idx.tz is not None: self.assertTrue(idx.tz in str(idx)) if hasattr(idx, 'freq'): self.assertTrue("freq='%s'" % idx.freqstr in str(idx)) def test_view(self): super(DatetimeLike, self).test_view() i = self.create_index() i_view = i.view('i8') result = self._holder(i) tm.assert_index_equal(result, i) i_view = i.view(self._holder) result = self._holder(i) tm.assert_index_equal(result, i_view) class TestDatetimeIndex(DatetimeLike, tm.TestCase): _holder = DatetimeIndex _multiprocess_can_split_ = True def setUp(self): self.indices = dict(index=tm.makeDateIndex(10)) self.setup_indices() def create_index(self): return date_range('20130101', periods=5) def test_shift(self): # test shift for datetimeIndex and non datetimeIndex # GH8083 drange = self.create_index() result = drange.shift(1) expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', '2013-01-05', '2013-01-06'], freq='D') self.assert_index_equal(result, expected) result = drange.shift(-1) expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', '2013-01-03', '2013-01-04'], freq='D') self.assert_index_equal(result, expected) result = drange.shift(3, freq='2D') expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', '2013-01-10', '2013-01-11'], freq='D') self.assert_index_equal(result, expected) def test_construction_with_alt(self): i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern') i2 = DatetimeIndex(i, dtype=i.dtype) self.assert_index_equal(i, i2) i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz) self.assert_index_equal(i, i2) i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype) self.assert_index_equal(i, i2) i2 = DatetimeIndex( i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz) self.assert_index_equal(i, i2) # localize into the provided tz i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC') expected = i.tz_localize(None).tz_localize('UTC') self.assert_index_equal(i2, expected) # incompat tz/dtype self.assertRaises(ValueError, lambda: DatetimeIndex( i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific')) def test_pickle_compat_construction(self): pass def test_construction_index_with_mixed_timezones(self): # GH 11488 # no tz results in DatetimeIndex result = Index( [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') exp = DatetimeIndex( [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNone(result.tz) # same tz results in DatetimeIndex result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')], name='idx') exp = DatetimeIndex( [Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00') ], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) # same tz results in DatetimeIndex (DST) result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'), Timestamp('2011-08-01 10:00', tz='US/Eastern')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-08-01 10:00')], tz='US/Eastern', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) # different tz results in Index(dtype=object) result = Index([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = Index([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertFalse(isinstance(result, DatetimeIndex)) result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertFalse(isinstance(result, DatetimeIndex)) # passing tz results in DatetimeIndex result = Index([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='Asia/Tokyo', name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 19:00'), Timestamp('2011-01-03 00:00')], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # length = 1 result = Index([Timestamp('2011-01-01')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNone(result.tz) # length = 1 with tz result = Index( [Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) def test_construction_index_with_mixed_timezones_with_NaT(self): # GH 11488 result = Index([pd.NaT, Timestamp('2011-01-01'), pd.NaT, Timestamp('2011-01-02')], name='idx') exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'), pd.NaT, Timestamp('2011-01-02')], name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNone(result.tz) # same tz results in DatetimeIndex result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')], name='idx') exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00')], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) # same tz results in DatetimeIndex (DST) result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'), pd.NaT, Timestamp('2011-08-01 10:00', tz='US/Eastern')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-08-01 10:00')], tz='US/Eastern', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) # different tz results in Index(dtype=object) result = Index([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertFalse(isinstance(result, DatetimeIndex)) result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertFalse(isinstance(result, DatetimeIndex)) # passing tz results in DatetimeIndex result = Index([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='Asia/Tokyo', name='idx') exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'), pd.NaT, Timestamp('2011-01-03 00:00')], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # all NaT result = Index([pd.NaT, pd.NaT], name='idx') exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNone(result.tz) # all NaT with tz result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx') exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) self.assertIsNotNone(result.tz) self.assertEqual(result.tz, exp.tz) def test_construction_dti_with_mixed_timezones(self): # GH 11488 (not changed, added explicit tests) # no tz results in DatetimeIndex result = DatetimeIndex( [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') exp = DatetimeIndex( [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # same tz results in DatetimeIndex result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')], name='idx') exp = DatetimeIndex( [Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00') ], tz='Asia/Tokyo', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # same tz results in DatetimeIndex (DST) result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'), Timestamp('2011-08-01 10:00', tz='US/Eastern')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-08-01 10:00')], tz='US/Eastern', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # different tz coerces tz-naive to tz-awareIndex(dtype=object) result = DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 05:00'), Timestamp('2011-01-02 10:00')], tz='US/Eastern', name='idx') self.assert_index_equal(result, exp, exact=True) self.assertTrue(isinstance(result, DatetimeIndex)) # tz mismatch affecting to tz-aware raises TypeError/ValueError with tm.assertRaises(ValueError): DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') with tm.assertRaises(TypeError): DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='Asia/Tokyo', name='idx') with tm.assertRaises(ValueError): DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], tz='US/Eastern', name='idx') def test_astype(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) result = idx.astype(object) expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object) tm.assert_index_equal(result, expected) result = idx.astype(int) expected = Int64Index([1463356800000000000] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) rng = date_range('1/1/2000', periods=10) result = rng.astype('i8') self.assert_index_equal(result, Index(rng.asi8)) self.assert_numpy_array_equal(result.values, rng.asi8) def test_astype_with_tz(self): # with tz rng = date_range('1/1/2000', periods=10, tz='US/Eastern') result = rng.astype('datetime64[ns]') expected = (date_range('1/1/2000', periods=10, tz='US/Eastern') .tz_convert('UTC').tz_localize(None)) tm.assert_index_equal(result, expected) # BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str) expected = pd.Series( ['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object) tm.assert_series_equal(result, expected) result = Series(pd.date_range('2012-01-01', periods=3, tz='US/Eastern')).astype(str) expected = Series(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00', '2012-01-03 00:00:00-05:00'], dtype=object) tm.assert_series_equal(result, expected) def test_astype_str_compat(self): # GH 13149, GH 13209 # verify that we are returing NaT as a string (and not unicode) idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) result = idx.astype(str) expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object) tm.assert_index_equal(result, expected) def test_astype_str(self): # test astype string - #10442 result = date_range('2012-01-01', periods=4, name='test_name').astype(str) expected = Index(['2012-01-01', '2012-01-02', '2012-01-03', '2012-01-04'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with tz and name result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str) expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00', '2012-01-03 00:00:00-05:00'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with freqH and name result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str) expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with freqH and timezone result = date_range('3/6/2012 00:00', periods=2, freq='H', tz='Europe/London', name='test_name').astype(str) expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'], dtype=object, name='test_name') tm.assert_index_equal(result, expected) def test_astype_datetime64(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) result = idx.astype('datetime64[ns]') tm.assert_index_equal(result, idx) self.assertFalse(result is idx) result = idx.astype('datetime64[ns]', copy=False) tm.assert_index_equal(result, idx) self.assertTrue(result is idx) idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST') result = idx_tz.astype('datetime64[ns]') expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]') tm.assert_index_equal(result, expected) def test_astype_raises(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) self.assertRaises(ValueError, idx.astype, float) self.assertRaises(ValueError, idx.astype, 'timedelta64') self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]') self.assertRaises(ValueError, idx.astype, 'datetime64') self.assertRaises(ValueError, idx.astype, 'datetime64[D]') def test_where_other(self): # other is ndarray or Index i = pd.date_range('20130101', periods=3, tz='US/Eastern') for arr in [np.nan, pd.NaT]: result = i.where(notnull(i), other=np.nan) expected = i tm.assert_index_equal(result, expected) i2 = i.copy() i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) result = i.where(notnull(i2), i2) tm.assert_index_equal(result, i2) i2 = i.copy() i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) result = i.where(notnull(i2), i2.values) tm.assert_index_equal(result, i2) def test_where_tz(self): i = pd.date_range('20130101', periods=3, tz='US/Eastern') result = i.where(notnull(i)) expected = i tm.assert_index_equal(result, expected) i2 = i.copy() i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) result = i.where(notnull(i2)) expected = i2 tm.assert_index_equal(result, expected) def test_get_loc(self): idx = pd.date_range('2000-01-01', periods=3) for method in [None, 'pad', 'backfill', 'nearest']: self.assertEqual(idx.get_loc(idx[1], method), 1) self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1) self.assertEqual(idx.get_loc(str(idx[1]), method), 1) if method is not None: self.assertEqual(idx.get_loc(idx[1], method, tolerance=pd.Timedelta('0 days')), 1) self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0) self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1) self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest', tolerance='1 day'), 1) self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest', tolerance=pd.Timedelta('1D')), 1) self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest', tolerance=np.timedelta64(1, 'D')), 1) self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest', tolerance=timedelta(1)), 1) with tm.assertRaisesRegexp(ValueError, 'must be convertible'): idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo') with tm.assertRaises(KeyError): idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours') self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3)) self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3)) self.assertEqual(idx.get_loc('1999', method='nearest'), 0) self.assertEqual(idx.get_loc('2001', method='nearest'), 2) with tm.assertRaises(KeyError): idx.get_loc('1999', method='pad') with tm.assertRaises(KeyError): idx.get_loc('2001', method='backfill') with tm.assertRaises(KeyError): idx.get_loc('foobar') with tm.assertRaises(TypeError): idx.get_loc(slice(2)) idx = pd.to_datetime(['2000-01-01', '2000-01-04']) self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0) self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1) self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2)) # time indexing idx = pd.date_range('2000-01-01', periods=24, freq='H') tm.assert_numpy_array_equal(idx.get_loc(time(12)), np.array([12], dtype=np.int64)) tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), np.array([], dtype=np.int64)) with tm.assertRaises(NotImplementedError): idx.get_loc(time(12, 30), method='pad') def test_get_indexer(self): idx = pd.date_range('2000-01-01', periods=3) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2])) target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1])) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2])) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1])) tm.assert_numpy_array_equal( idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')), np.array([0, -1, 1])) with tm.assertRaises(ValueError): idx.get_indexer(idx[[0]], method='nearest', tolerance='foo') def test_roundtrip_pickle_with_tz(self): # GH 8367 # round-trip of timezone index = date_range('20130101', periods=3, tz='US/Eastern', name='foo') unpickled = self.round_trip_pickle(index) self.assert_index_equal(index, unpickled) def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): # GH7774 index = date_range('20130101', periods=3, tz='US/Eastern') self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern') self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern') def test_time_loc(self): # GH8667 from datetime import time from pandas.index import _SIZE_CUTOFF ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64) key = time(15, 11, 30) start = key.hour * 3600 + key.minute * 60 + key.second step = 24 * 3600 for n in ns: idx = pd.date_range('2014-11-26', periods=n, freq='S') ts = pd.Series(np.random.randn(n), index=idx) i = np.arange(start, n, step) tm.assert_numpy_array_equal(ts.index.get_loc(key), i) tm.assert_series_equal(ts[key], ts.iloc[i]) left, right = ts.copy(), ts.copy() left[key] *= -10 right.iloc[i] *= -10 tm.assert_series_equal(left, right) def test_time_overflow_for_32bit_machines(self): # GH8943. On some machines NumPy defaults to np.int32 (for example, # 32-bit Linux machines). In the function _generate_regular_range # found in tseries/index.py, `periods` gets multiplied by `strides` # (which has value 1e9) and since the max value for np.int32 is ~2e9, # and since those machines won't promote np.int32 to np.int64, we get # overflow. periods = np.int_(1000) idx1 = pd.date_range(start='2000', periods=periods, freq='S') self.assertEqual(len(idx1), periods) idx2 = pd.date_range(end='2000', periods=periods, freq='S') self.assertEqual(len(idx2), periods) def test_intersection(self): first = self.index second = self.index[5:] intersect = first.intersection(second) self.assertTrue(tm.equalContents(intersect, second)) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: result = first.intersection(case) self.assertTrue(tm.equalContents(result, second)) third = Index(['a', 'b', 'c']) result = first.intersection(third) expected = pd.Index([], dtype=object) self.assert_index_equal(result, expected) def test_union(self): first = self.index[:5] second = self.index[5:] everything = self.index union = first.union(second) self.assertTrue(tm.equalContents(union, everything)) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: result = first.union(case) self.assertTrue(tm.equalContents(result, everything)) def test_nat(self): self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT) def test_ufunc_coercions(self): idx = date_range('2011-01-01', periods=3, freq='2D', name='x') delta = np.timedelta64(1, 'D') for result in [idx + delta, np.add(idx, delta)]: tm.assertIsInstance(result, DatetimeIndex) exp = date_range('2011-01-02', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, '2D') for result in [idx - delta, np.subtract(idx, delta)]: tm.assertIsInstance(result, DatetimeIndex) exp = date_range('2010-12-31', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, '2D') delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'), np.timedelta64(3, 'D')]) for result in [idx + delta, np.add(idx, delta)]: tm.assertIsInstance(result, DatetimeIndex) exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'], freq='3D', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, '3D') for result in [idx - delta, np.subtract(idx, delta)]: tm.assertIsInstance(result, DatetimeIndex) exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'], freq='D', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, 'D') def test_fillna_datetime64(self): # GH 11343 for tz in ['US/Eastern', 'Asia/Tokyo']: idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00']) exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00']) self.assert_index_equal( idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) # tz mismatch exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), pd.Timestamp('2011-01-01 10:00', tz=tz), pd.Timestamp('2011-01-01 11:00')], dtype=object) self.assert_index_equal( idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) # object exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x', pd.Timestamp('2011-01-01 11:00')], dtype=object) self.assert_index_equal(idx.fillna('x'), exp) idx = pd.DatetimeIndex( ['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], tz=tz) exp = pd.DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' ], tz=tz) self.assert_index_equal( idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), pd.Timestamp('2011-01-01 10:00'), pd.Timestamp('2011-01-01 11:00', tz=tz)], dtype=object) self.assert_index_equal( idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) # object exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), 'x', pd.Timestamp('2011-01-01 11:00', tz=tz)], dtype=object) self.assert_index_equal(idx.fillna('x'), exp) class TestPeriodIndex(DatetimeLike, tm.TestCase): _holder = PeriodIndex _multiprocess_can_split_ = True def setUp(self): self.indices = dict(index=tm.makePeriodIndex(10)) self.setup_indices() def create_index(self): return period_range('20130101', periods=5, freq='D') def test_astype(self): # GH 13149, GH 13209 idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') result = idx.astype(object) expected = Index([Period('2016-05-16', freq='D')] + [Period(NaT, freq='D')] * 3, dtype='object') # Hack because of lack of support for Period null checking (GH12759) tm.assert_index_equal(result[:1], expected[:1]) result_arr = np.asarray([p.ordinal for p in result], dtype=np.int64) expected_arr = np.asarray([p.ordinal for p in expected], dtype=np.int64) tm.assert_numpy_array_equal(result_arr, expected_arr) # TODO: When GH12759 is resolved, change the above hack to: # tm.assert_index_equal(result, expected) # now, it raises. result = idx.astype(int) expected = Int64Index([16937] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) idx = period_range('1990', '2009', freq='A') result = idx.astype('i8') self.assert_index_equal(result, Index(idx.asi8)) self.assert_numpy_array_equal(result.values, idx.values) def test_astype_raises(self): # GH 13149, GH 13209 idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') self.assertRaises(ValueError, idx.astype, str) self.assertRaises(ValueError, idx.astype, float) self.assertRaises(ValueError, idx.astype, 'timedelta64') self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]') self.assertRaises(ValueError, idx.astype, 'datetime64') self.assertRaises(ValueError, idx.astype, 'datetime64[ns]') def test_shift(self): # test shift for PeriodIndex # GH8083 drange = self.create_index() result = drange.shift(1) expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04', '2013-01-05', '2013-01-06'], freq='D') self.assert_index_equal(result, expected) def test_pickle_compat_construction(self): pass def test_get_loc(self): idx = pd.period_range('2000-01-01', periods=3) for method in [None, 'pad', 'backfill', 'nearest']: self.assertEqual(idx.get_loc(idx[1], method), 1) self.assertEqual( idx.get_loc(idx[1].asfreq('H', how='start'), method), 1) self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1) self.assertEqual( idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1) self.assertEqual(idx.get_loc(str(idx[1]), method), 1) idx = pd.period_range('2000-01-01', periods=5)[::2] self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest', tolerance='1 day'), 1) self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest', tolerance=pd.Timedelta('1D')), 1) self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest', tolerance=np.timedelta64(1, 'D')), 1) self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest', tolerance=timedelta(1)), 1) with tm.assertRaisesRegexp(ValueError, 'must be convertible'): idx.get_loc('2000-01-10', method='nearest', tolerance='foo') msg = 'Input has different freq from PeriodIndex\\(freq=D\\)' with tm.assertRaisesRegexp(ValueError, msg): idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour') with tm.assertRaises(KeyError): idx.get_loc('2000-01-10', method='nearest', tolerance='1 day') def test_where(self): i = self.create_index() result = i.where(notnull(i)) expected = i tm.assert_index_equal(result, expected) i2 = i.copy() i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq='D') result = i.where(notnull(i2)) expected = i2 tm.assert_index_equal(result, expected) def test_where_other(self): i = self.create_index() for arr in [np.nan, pd.NaT]: result = i.where(notnull(i), other=np.nan) expected = i tm.assert_index_equal(result, expected) i2 = i.copy() i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq='D') result = i.where(notnull(i2), i2) tm.assert_index_equal(result, i2) i2 = i.copy() i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq='D') result = i.where(notnull(i2), i2.values) tm.assert_index_equal(result, i2) def test_get_indexer(self): idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start') tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.int_)) target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12', '2000-01-02T01'], freq='H') tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.int_)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.int_)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.int_)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', tolerance='1 hour'), np.array([0, -1, 1], dtype=np.int_)) msg = 'Input has different freq from PeriodIndex\\(freq=H\\)' with self.assertRaisesRegexp(ValueError, msg): idx.get_indexer(target, 'nearest', tolerance='1 minute') tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest', tolerance='1 day'), np.array([0, 1, 1], dtype=np.int_)) def test_repeat(self): # GH10183 idx = pd.period_range('2000-01-01', periods=3, freq='D') res = idx.repeat(3) exp = PeriodIndex(idx.values.repeat(3), freq='D') self.assert_index_equal(res, exp) self.assertEqual(res.freqstr, 'D') def test_period_index_indexer(self): # GH4125 idx = pd.period_range('2002-01', '2003-12', freq='M') df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx) self.assert_frame_equal(df, df.ix[idx]) self.assert_frame_equal(df, df.ix[list(idx)]) self.assert_frame_equal(df, df.loc[list(idx)]) self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]]) self.assert_frame_equal(df, df.loc[list(idx)]) def test_fillna_period(self): # GH 11343 idx = pd.PeriodIndex( ['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], freq='H') exp = pd.PeriodIndex( ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' ], freq='H') self.assert_index_equal( idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp) exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x', pd.Period('2011-01-01 11:00', freq='H')], dtype=object) self.assert_index_equal(idx.fillna('x'), exp) with tm.assertRaisesRegexp( ValueError, 'Input has different freq=D from PeriodIndex\\(freq=H\\)'): idx.fillna(pd.Period('2011-01-01', freq='D')) def test_no_millisecond_field(self): with self.assertRaises(AttributeError): DatetimeIndex.millisecond with self.assertRaises(AttributeError): DatetimeIndex([]).millisecond class TestTimedeltaIndex(DatetimeLike, tm.TestCase): _holder = TimedeltaIndex _multiprocess_can_split_ = True def setUp(self): self.indices = dict(index=tm.makeTimedeltaIndex(10)) self.setup_indices() def create_index(self): return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1) def test_shift(self): # test shift for TimedeltaIndex # err8083 drange = self.create_index() result = drange.shift(1) expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00'], freq='D') self.assert_index_equal(result, expected) result = drange.shift(3, freq='2D 1s') expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03', '8 days 01:00:03', '9 days 01:00:03', '10 days 01:00:03'], freq='D') self.assert_index_equal(result, expected) def test_astype(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) result = idx.astype(object) expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3, dtype=object) tm.assert_index_equal(result, expected) result = idx.astype(int) expected = Int64Index([100000000000000] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) rng = timedelta_range('1 days', periods=10) result = rng.astype('i8') self.assert_index_equal(result, Index(rng.asi8)) self.assert_numpy_array_equal(rng.asi8, result.values) def test_astype_timedelta64(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) result = idx.astype('timedelta64') expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64') tm.assert_index_equal(result, expected) result = idx.astype('timedelta64[ns]') tm.assert_index_equal(result, idx) self.assertFalse(result is idx) result = idx.astype('timedelta64[ns]', copy=False) tm.assert_index_equal(result, idx) self.assertTrue(result is idx) def test_astype_raises(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) self.assertRaises(ValueError, idx.astype, float) self.assertRaises(ValueError, idx.astype, str) self.assertRaises(ValueError, idx.astype, 'datetime64') self.assertRaises(ValueError, idx.astype, 'datetime64[ns]') def test_get_loc(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) for method in [None, 'pad', 'backfill', 'nearest']: self.assertEqual(idx.get_loc(idx[1], method), 1) self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1) self.assertEqual(idx.get_loc(str(idx[1]), method), 1) self.assertEqual( idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1) self.assertEqual( idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1) self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1) with tm.assertRaisesRegexp(ValueError, 'must be convertible'): idx.get_loc(idx[1], method='nearest', tolerance='foo') for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: self.assertEqual(idx.get_loc('1 day 1 hour', method), loc) def test_get_indexer(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days']) tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.int_)) target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.int_)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.int_)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.int_)) res = idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')) tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.int_)) def test_numeric_compat(self): idx = self._holder(np.arange(5, dtype='int64')) didx = self._holder(np.arange(5, dtype='int64') ** 2) result = idx * 1 tm.assert_index_equal(result, idx) result = 1 * idx tm.assert_index_equal(result, idx) result = idx / 1 tm.assert_index_equal(result, idx) result = idx // 1 tm.assert_index_equal(result, idx) result = idx * np.array(5, dtype='int64') tm.assert_index_equal(result, self._holder(np.arange(5, dtype='int64') * 5)) result = idx * np.arange(5, dtype='int64') tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='int64')) tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='float64') + 0.1) tm.assert_index_equal(result, self._holder(np.arange( 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) # invalid self.assertRaises(TypeError, lambda: idx * idx) self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3))) self.assertRaises(ValueError, lambda: idx * np.array([1, 2])) def test_pickle_compat_construction(self): pass def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], freq='2H', name='x') for result in [idx * 2, np.multiply(idx, 2)]: tm.assertIsInstance(result, TimedeltaIndex) exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'], freq='4H', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, '4H') for result in [idx / 2, np.divide(idx, 2)]: tm.assertIsInstance(result, TimedeltaIndex) exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'], freq='H', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, 'H') idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], freq='2H', name='x') for result in [-idx, np.negative(idx)]: tm.assertIsInstance(result, TimedeltaIndex) exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'], freq='-2H', name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, '-2H') idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'], freq='H', name='x') for result in [abs(idx), np.absolute(idx)]: tm.assertIsInstance(result, TimedeltaIndex) exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'], freq=None, name='x') tm.assert_index_equal(result, exp) self.assertEqual(result.freq, None) def test_fillna_timedelta(self): # GH 11343 idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day']) exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day']) self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp) exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day']) idx.fillna(
pd.Timedelta('3 hour')
pandas.Timedelta
# -*- coding: utf-8 -*- """Unit tests for cartoframes.data.Dataset""" import json import os import sys import unittest import warnings import numpy as np import pandas as pd import geopandas as gpd from carto.exceptions import CartoException from cartoframes.auth import Credentials from cartoframes.data import Dataset from cartoframes.data.clients import SQLClient from cartoframes.data.dataset.registry.base_dataset import BaseDataset from cartoframes.data.dataset.registry.strategies_registry import StrategiesRegistry from cartoframes.data.dataset.registry.dataframe_dataset import ( DataFrameDataset, _rows) from cartoframes.data.dataset.registry.query_dataset import QueryDataset from cartoframes.data.dataset.registry.table_dataset import TableDataset from cartoframes.lib import context from cartoframes.utils.columns import DataframeColumnsInfo, normalize_name from cartoframes.utils.geom_utils import setting_value_exception from cartoframes.utils.utils import load_geojson from tests.e2e.helpers import _UserUrlLoader from tests.unit.mocks.context_mock import ContextMock from tests.unit.mocks.dataset_mock import DatasetMock, QueryDatasetMock try: from unittest.mock import Mock except ImportError: from mock import Mock WILL_SKIP = False warnings.filterwarnings('ignore') class TestDataset(unittest.TestCase, _UserUrlLoader): """Tests for cartoframes.CARTOframes""" def setUp(self): if (os.environ.get('APIKEY') is None or os.environ.get('USERNAME') is None): try: creds = json.loads(open('tests/e2e/secret.json').read()) self.apikey = creds['APIKEY'] self.username = creds['USERNAME'] except Exception: warnings.warn("Skipping Context tests. To test it, " "create a `secret.json` file in test/ by " "renaming `secret.json.sample` to `secret.json` " "and updating the credentials to match your " "environment.") self.apikey = None self.username = None else: self.apikey = os.environ['APIKEY'] self.username = os.environ['USERNAME'] # table naming info has_mpl = 'mpl' if os.environ.get('MPLBACKEND') else 'nonmpl' has_gpd = 'gpd' if os.environ.get('USE_GEOPANDAS') else 'nongpd' pyver = sys.version[0:3].replace('.', '_') buildnum = os.environ.get('TRAVIS_BUILD_NUMBER') or 'none' test_slug = '{ver}_{num}_{mpl}_{gpd}'.format( ver=pyver, num=buildnum, mpl=has_mpl, gpd=has_gpd ) # for writing to carto self.test_write_table = normalize_name( 'cf_test_table_{}'.format(test_slug) ) self.base_url = self.user_url().format(username=self.username) self.credentials = Credentials(self.username, self.apikey, self.base_url) self.sql_client = SQLClient(self.credentials) self.test_geojson = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Point", "coordinates": [ -3.1640625, 42.032974332441405 ] } } ] } self.tearDown() def tearDown(self): """restore to original state""" tables = (self.test_write_table, ) sql_drop = 'DROP TABLE IF EXISTS {};' for table in tables: try: Dataset(table, credentials=self.credentials).delete() self.sql_client.query(sql_drop.format(table)) except CartoException: warnings.warn('Error deleting tables') StrategiesRegistry.instance = None def test_dataset_upload_validation_fails_only_with_table_name(self): table_name = 'fake_table' dataset = Dataset(table_name, credentials=self.credentials) err_msg = 'Nothing to upload. We need data in a DataFrame or GeoDataFrame or a query to upload data to CARTO.' with self.assertRaises(ValueError, msg=err_msg): dataset.upload() def test_dataset_upload_validation_query_fails_without_table_name(self): query = 'SELECT 1' dataset = Dataset(query, credentials=self.credentials) with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'): dataset.upload() def test_dataset_upload_validation_df_fails_without_table_name_and_context(self): df = load_geojson(self.test_geojson) dataset = Dataset(df) with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'): dataset.upload() def test_dataset_upload_validation_df_fails_without_context(self): df = load_geojson(self.test_geojson) dataset = Dataset(df) with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'): dataset.upload(table_name=self.test_write_table) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_upload_into_existing_table_fails_without_replace_property(self): query = 'SELECT 1' dataset = Dataset(query, credentials=self.credentials) dataset.upload(table_name=self.test_write_table) dataset = Dataset(query, credentials=self.credentials) err_msg = ('Table with name {t} and schema {s} already exists in CARTO. Please choose a different `table_name`' 'or use if_exists="replace" to overwrite it').format(t=self.test_write_table, s='public') with self.assertRaises(CartoException, msg=err_msg): dataset.upload(table_name=self.test_write_table) dataset.upload(table_name=self.test_write_table, if_exists=Dataset.IF_EXISTS_REPLACE) def test_dataset_upload_validation_fails_with_query_and_append(self): query = 'SELECT 1' dataset = Dataset(query, credentials=self.credentials) err_msg = 'Error using append with a query Dataset. It is not possible to append data to a query' with self.assertRaises(CartoException, msg=err_msg): dataset.upload(table_name=self.test_write_table, if_exists=Dataset.IF_EXISTS_APPEND) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_download_validations(self): self.assertNotExistsTable(self.test_write_table) df = load_geojson(self.test_geojson) dataset = Dataset(df) error_msg = 'You should provide a context and a table_name or query to download data.' with self.assertRaises(ValueError, msg=error_msg): dataset.download() query = 'SELECT 1 as fakec' dataset = Dataset(query, credentials=self.credentials) dataset.upload(table_name=self.test_write_table) dataset._table_name = 'non_used_table' df = dataset.download() self.assertEqual('fakec' in df.columns, True) dataset = Dataset(self.test_write_table, credentials=self.credentials) df = dataset.download() self.assertEqual('fakec' in df.columns, True) def test_dataset_download_and_upload(self): self.assertNotExistsTable(self.test_write_table) query = 'SELECT 1 as fakec' dataset = Dataset(query, credentials=self.credentials) df = dataset.download() dataset = Dataset(df) dataset.upload(table_name=self.test_write_table, credentials=self.credentials) self.assertExistsTable(self.test_write_table) dataset = Dataset(self.test_write_table, credentials=self.credentials) df = dataset.download() dataset = Dataset(df) dataset.upload(table_name=self.test_write_table, credentials=self.credentials, if_exists=Dataset.IF_EXISTS_REPLACE) def test_dataset_upload_and_download_special_values(self): self.assertNotExistsTable(self.test_write_table) orig_df = pd.DataFrame({ 'lat': [0, 1, 2], 'lng': [0, 1, 2], 'svals': [np.inf, -np.inf, np.nan] }) dataset = Dataset(orig_df) dataset.upload(table_name=self.test_write_table, with_lnglat=('lng', 'lat'), credentials=self.credentials) self.assertExistsTable(self.test_write_table) dataset = Dataset(self.test_write_table, credentials=self.credentials) df = dataset.download() assert df.lat.equals(orig_df.lat) assert df.lng.equals(orig_df.lng) assert df.svals.equals(orig_df.svals) assert df.the_geom.notnull().all() def test_dataset_download_bool_null(self): self.assertNotExistsTable(self.test_write_table) query = 'SELECT * FROM (values (true, true), (false, false), (false, null)) as x(fakec_bool, fakec_bool_null)' dataset = Dataset(query, credentials=self.credentials) dataset.upload(table_name=self.test_write_table) dataset = Dataset(self.test_write_table, credentials=self.credentials) df = dataset.download() self.assertEqual(df['fakec_bool'].dtype, 'bool') self.assertEqual(df['fakec_bool_null'].dtype, 'object') self.assertEqual(list(df['fakec_bool']), [True, False, False]) self.assertEqual(list(df['fakec_bool_null']), [True, False, None]) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_points_dataset(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_mcdonalds_nyc df = read_mcdonalds_nyc(limit=100) dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 100) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_lines_dataset(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_ne_50m_graticules_15 df = read_ne_50m_graticules_15() dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 35) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_polygons_dataset(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_lnglat_dataset(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_taxi df = read_taxi(limit=50) lnglat = ('dropoff_longitude', 'dropoff_latitude') Dataset(df).upload(with_lnglat=lnglat, table_name=self.test_write_table, credentials=self.credentials) self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 50) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_null_geometry_column(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_taxi df = read_taxi(limit=10) dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom_webmercator IS NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 10) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_with_different_geometry_column(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() df.rename(columns={'the_geom': 'geometry'}, inplace=True) dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_with_different_geom_column(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() df.rename(columns={'the_geom': 'geom'}, inplace=True) dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_geopandas(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_taxi import shapely df = read_taxi(limit=50) df.drop(['the_geom'], axis=1, inplace=True) gdf = gpd.GeoDataFrame(df.drop(['dropoff_longitude', 'dropoff_latitude'], axis=1), crs={'init': 'epsg:4326'}, geometry=[shapely.geometry.Point(xy) for xy in zip(df.dropoff_longitude, df.dropoff_latitude)]) # TODO: use from_geodataframe dataset = Dataset(gdf).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 50) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_wkt(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_taxi df = read_taxi(limit=50) df['the_geom'] = df.apply(lambda x: 'POINT ({x} {y})' .format(x=x['dropoff_longitude'], y=x['dropoff_latitude']), axis=1) dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 50) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_if_exists_fail_by_default(self): self.assertNotExistsTable(self.test_write_table) from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name err_msg = ('Table with name {t} and schema {s} already exists in CARTO. Please choose a different `table_name`' 'or use if_exists="replace" to overwrite it').format(t=self.test_write_table, s='public') with self.assertRaises(CartoException, msg=err_msg): dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_if_exists_append(self): from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) # avoid uploading the same index or cartodb_id df.index += df.index.max() + 1 df['cartodb_id'] += df['cartodb_id'].max() + 1 Dataset(df).upload(if_exists=Dataset.IF_EXISTS_APPEND, table_name=self.test_write_table, credentials=self.credentials) self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052 * 2) @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') def test_dataset_write_if_exists_replace(self): from cartoframes.examples import read_brooklyn_poverty df = read_brooklyn_poverty() dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials) self.test_write_table = dataset.table_name dataset = Dataset(df).upload( if_exists=Dataset.IF_EXISTS_REPLACE, table_name=self.test_write_table, credentials=self.credentials) self.assertExistsTable(self.test_write_table) query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table) result = self.sql_client.query(query, verbose=True) self.assertEqual(result['total_rows'], 2052) def test_dataset_schema_from_parameter(self): schema = 'fake_schema' dataset = Dataset('fake_table', schema=schema, credentials=self.credentials) self.assertEqual(dataset.schema, schema) def test_dataset_schema_from_non_org_context(self): dataset = Dataset('fake_table', credentials=self.credentials) self.assertEqual(dataset.schema, 'public') def test_dataset_schema_from_org_context(self): pass # dataset = DatasetMock('fake_table', credentials=self.credentials) # self.assertEqual(dataset.schema, 'fake_username') # FIXME does not work in python 2.7 (COPY stucks and blocks the table, fix after # https://github.com/CartoDB/CartoDB-SQL-API/issues/579 is fixed) # @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test') # def test_dataset_write_with_encoding(self): # df = pd.DataFrame({'vals': [1, 2], 'strings': ['a', 'ô']}) # dataset = self.con.write(df, self.test_write_table) # self.test_write_table = dataset.table_name # self.assertExistsTable(self.test_write_table) def assertExistsTable(self, table_name): resp = self.sql_client.query(''' SELECT * FROM {table} LIMIT 0 '''.format(table=table_name)) self.assertIsNotNone(resp) def assertNotExistsTable(self, table_name): try: self.sql_client.query(''' SELECT * FROM {table} LIMIT 0 '''.format(table=table_name)) except CartoException as e: self.assertTrue('relation "{}" does not exist'.format(table_name) in str(e)) class TestDatasetInfo(unittest.TestCase): def setUp(self): self.username = 'fake_username' self.api_key = 'fake_api_key' self.credentials = Credentials(username=self.username, api_key=self.api_key) self._context_mock = ContextMock() # Mock create_context method self.original_create_context = context.create_context context.create_context = lambda c: self._context_mock def tearDown(self): context.create_context = self.original_create_context def test_dataset_info_should_work_from_table(self): table_name = 'fake_table' dataset = DatasetMock(table_name, credentials=self.credentials) self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE) def test_dataset_get_privacy_from_new_table(self): query = 'SELECT 1' dataset = DatasetMock(query, credentials=self.credentials) dataset.upload(table_name='fake_table') dataset = DatasetMock('fake_table', credentials=self.credentials) self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE) def test_dataset_set_privacy_to_new_table(self): query = 'SELECT 1' dataset = DatasetMock(query, credentials=self.credentials) dataset.upload(table_name='fake_table') dataset = DatasetMock('fake_table', credentials=self.credentials) dataset.update_dataset_info(privacy=Dataset.PRIVACY_PUBLIC) self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PUBLIC) def test_dataset_set_privacy_with_wrong_parameter(self): query = 'SELECT 1' dataset = DatasetMock(query, credentials=self.credentials) dataset.upload(table_name='fake_table') wrong_privacy = 'wrong_privacy' error_msg = 'Wrong privacy. The privacy: {p} is not valid. You can use: {o1}, {o2}, {o3}'.format( p=wrong_privacy, o1=Dataset.PRIVACY_PRIVATE, o2=Dataset.PRIVACY_PUBLIC, o3=Dataset.PRIVACY_LINK) with self.assertRaises(ValueError, msg=error_msg): dataset.update_dataset_info(privacy=wrong_privacy) def test_dataset_info_props_are_private(self): table_name = 'fake_table' dataset = DatasetMock(table_name, credentials=self.credentials) dataset_info = dataset.dataset_info self.assertEqual(dataset_info.privacy, Dataset.PRIVACY_PRIVATE) privacy = Dataset.PRIVACY_PUBLIC error_msg = str(setting_value_exception('privacy', privacy)) with self.assertRaises(CartoException, msg=error_msg): dataset_info.privacy = privacy self.assertEqual(dataset_info.privacy, Dataset.PRIVACY_PRIVATE) def test_dataset_info_from_dataframe(self): df = pd.DataFrame.from_dict({'test': [True, [1, 2]]}) dataset = DatasetMock(df) error_msg = ('Your data is not synchronized with CARTO.' 'First of all, you should call upload method ' 'to save your data in CARTO.') with self.assertRaises(CartoException, msg=error_msg): self.assertIsNotNone(dataset.dataset_info) def test_dataset_info_from_dataframe_sync(self): df = pd.DataFrame.from_dict({'test': [True, [1, 2]]}) dataset = DatasetMock(df) dataset.upload(table_name='fake_table', credentials=self.credentials) dataset = DatasetMock('fake_table', credentials=self.credentials) self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE) def test_dataset_info_from_query(self): query = 'SELECT 1' dataset = DatasetMock(query, credentials=self.credentials) error_msg = ('We can not extract Dataset info from a QueryDataset. Use a TableDataset ' '`Dataset(table_name)` to get or modify the info from a CARTO table.') with self.assertRaises(ValueError, msg=error_msg): self.assertIsNotNone(dataset.dataset_info) def test_dataset_info_from_query_update(self): query = 'SELECT 1' dataset = DatasetMock(query, credentials=self.credentials) error_msg = ('We can not extract Dataset info from a QueryDataset. Use a TableDataset ' '`Dataset(table_name)` to get or modify the info from a CARTO table.') with self.assertRaises(ValueError, msg=error_msg): dataset.update_dataset_info() class TestDatasetUnit(unittest.TestCase, _UserUrlLoader): """Unit tests for cartoframes.Dataset""" def setUp(self): self.username = 'fake_username' self.api_key = 'fake_api_key' self.credentials = Credentials(username=self.username, api_key=self.api_key) self.test_geojson = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Point", "coordinates": [ -3.1640625, 42.032974332441405 ] } } ] } self._context_mock = ContextMock() # Mock create_context method self.original_create_context = context.create_context context.create_context = lambda c: self._context_mock def tearDown(self): StrategiesRegistry.instance = None context.create_context = self.original_create_context def assertIsTableDatasetInstance(self, table_name): ds = DatasetMock(table_name, credentials=self.credentials) error = "Dataset('{}')._strategy is not an instance of TableDataset".format(table_name) self.assertTrue(isinstance(ds._strategy, TableDataset), msg=error) def assertIsQueryDatasetInstance(self, query): ds = DatasetMock(query, credentials=self.credentials) error = "Dataset('{}')._strategy is not an instance of QueryDataset".format(query) self.assertTrue(isinstance(ds._strategy, QueryDataset), msg=error) def assertIsDataFrameDatasetInstance(self, data): ds = DatasetMock(data) error = "Dataset('{}')._strategy is not an instance of DataFrameDataset".format(data) self.assertTrue(isinstance(ds._strategy, DataFrameDataset), msg=error) def test_creation_from_valid_table_names(self): table_names = ['myt', 'my_t', 'tgeojson', 't_geojson', 'geojson', 'json', 'select_t'] for table_name in table_names: self.assertIsTableDatasetInstance(table_name) def test_creation_from_valid_queries(self): queries = ['SELECT * FROM', 'select * from', 'select c', 'with n as', 'WITH n AS', 'select * from json', 'select * from geojson'] for query in queries: self.assertIsQueryDatasetInstance(query) def test_creation_from_valid_dataframe(self): df = pd.DataFrame.from_dict({'test': [True, [1, 2]]}) self.assertIsDataFrameDatasetInstance(df) def test_creation_from_valid_geodataframe(self): df = pd.DataFrame.from_dict({'test': [True, [1, 2]]}) gdf = gpd.GeoDataFrame(df) self.assertIsDataFrameDatasetInstance(gdf) def test_creation_from_valid_localgeojson(self): self.assertIsDataFrameDatasetInstance(self.test_geojson) def test_creation_from_valid_geojson_file_path(self): paths = [os.path.abspath('tests/e2e/data/dataset/fixtures/valid.geojson'), os.path.abspath('tests/e2e/data/dataset/fixtures/validgeo.json')] for path in paths: self.assertIsDataFrameDatasetInstance(path) def test_creation_from_wrong_geojson_file_path(self): geojson_file_path = os.path.abspath('tests/e2e/data/dataset/fixtures/wrong.geojson') with self.assertRaises(Exception): self.assertIsDataFrameDatasetInstance(geojson_file_path) def test_creation_from_unexisting_geojson_file_path(self): geojson_file_path = os.path.abspath('unexisting.geojson') with self.assertRaises(ValueError, msg='We can not detect the Dataset type'): self.assertIsDataFrameDatasetInstance(geojson_file_path) def test_dataset_from_table(self): table_name = 'fake_table' dataset = DatasetMock(table_name, credentials=self.credentials) self.assertIsInstance(dataset, Dataset) self.assertEqual(dataset.table_name, table_name) self.assertEqual(dataset.schema, 'public') self.assertEqual(dataset.credentials, self.credentials) def test_dataset_from_query(self): query = 'SELECT * FROM fake_table' dataset = DatasetMock(query, credentials=self.credentials) self.assertIsInstance(dataset, Dataset) self.assertEqual(dataset.query, query) self.assertEqual(dataset.credentials, self.credentials) self.assertIsNone(dataset.table_name) def test_dataset_from_dataframe(self): df = load_geojson(self.test_geojson) dataset = Dataset(df) self.assertIsInstance(dataset, Dataset) self.assertIsNotNone(dataset.dataframe) self.assertIsNone(dataset.table_name) self.assertIsNone(dataset.credentials) def test_dataset_from_geodataframe(self): gdf = load_geojson(self.test_geojson) dataset = Dataset(gdf) self.assertIsInstance(dataset, Dataset) self.assertIsNotNone(dataset.dataframe) self.assertIsNone(dataset.table_name) self.assertIsNone(dataset.credentials) def test_dataset_from_geojson(self): geojson = self.test_geojson dataset = Dataset(geojson) self.assertIsInstance(dataset, Dataset) self.assertIsNotNone(dataset.dataframe) self.assertIsNone(dataset.table_name) self.assertIsNone(dataset.credentials) def test_dataset_from_table_without_credentials(self): table_name = 'fake_table' error_msg = ('Credentials attribute is required. ' 'Please pass a `Credentials` instance ' 'or use the `set_default_credentials` function.') with self.assertRaises(AttributeError, msg=error_msg): Dataset(table_name) def test_dataset_from_query_without_credentials(self): query = 'SELECT * FROM fake_table' error_msg = ('Credentials attribute is required. ' 'Please pass a `Credentials` instance ' 'or use the `set_default_credentials` function.') with self.assertRaises(AttributeError, msg=error_msg): Dataset(query) def test_dataset_get_table_names_from_table(self): table_name = 'fake_table' dataset = DatasetMock(table_name, credentials=self.credentials) self.assertEqual(dataset.get_table_names(), [table_name]) def test_dataset_get_table_names_from_query(self): table_name = 'fake_table' QueryDatasetMock.get_table_names = Mock(return_value=[table_name]) query = 'SELECT * FROM {}'.format(table_name) dataset = DatasetMock(query, credentials=self.credentials) self.assertEqual(dataset.get_table_names(), [table_name]) def test_dataset_get_table_names_from_dataframe(self): df = load_geojson(self.test_geojson) dataset = Dataset(df) error_msg = ('Your data is not synchronized with CARTO.' 'First of all, you should call upload method ' 'to save your data in CARTO.') with self.assertRaises(CartoException, msg=error_msg): dataset.get_table_names() def test_create_table_query(self): df = pd.DataFrame.from_dict({'cartodb_id': [1], 'the_geom': ['POINT (1 1)']}) dataframe_columns_info = DataframeColumnsInfo(df, None) table_name = 'fake_table' expected_result = 'CREATE TABLE {} (cartodb_id bigint, the_geom geometry(Point, 4326))'.format(table_name) dataset = DataFrameDataset(df) dataset.table_name = table_name result = dataset._create_table_query(dataframe_columns_info.columns) self.assertEqual(result, expected_result) def test_create_table_query_without_geom(self): df = pd.DataFrame.from_dict({'cartodb_id': [1]}) dataframe_columns_info = DataframeColumnsInfo(df, None) table_name = 'fake_table' expected_result = 'CREATE TABLE {} (cartodb_id bigint)'.format(table_name) dataset = DataFrameDataset(df) dataset.table_name = table_name result = dataset._create_table_query(dataframe_columns_info.columns) self.assertEqual(result, expected_result) def test_create_table_query_with_several_geometry_columns_prioritize_the_geom(self): df = pd.DataFrame([['POINT (0 0)', 'POINT (1 1)', 'POINT (2 2)']], columns=['geom', 'the_geom', 'geometry']) dataframe_columns_info = DataframeColumnsInfo(df, None) table_name = 'fake_table' expected_result = 'CREATE TABLE {} (geom text, the_geom geometry(Point, 4326), geometry text)'.format( table_name) dataset = DataFrameDataset(df) dataset.table_name = table_name result = dataset._create_table_query(dataframe_columns_info.columns) self.assertEqual(result, expected_result) def test_create_table_query_with_several_geometry_columns_and_geodataframe_prioritize_geometry(self): df =
pd.DataFrame([['POINT (0 0)', 'POINT (1 1)', 'POINT (2 2)']], columns=['geom', 'the_geom', 'geometry'])
pandas.DataFrame
""" Read in available site data Written by <NAME> 21st April 2020 """ import os import csv import configparser import pandas as pd import geopandas as gpd import xlrd import numpy as np from shapely.geometry import MultiPolygon from shapely.ops import transform, unary_union import rasterio # from rasterio.mask import mask from rasterstats import zonal_stats import pyproj import random from countries import COUNTRY_LIST CONFIG = configparser.ConfigParser() CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini')) BASE_PATH = CONFIG['file_locations']['base_path'] DATA_RAW = os.path.join(BASE_PATH, 'raw', 'real_site_data') DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate') DATA_PROCESSED = os.path.join(BASE_PATH, 'processed') def process_unconstrained_site_estimation(country): """ Allocate towers using an unconstrained site estimation process. """ iso3 = country['iso3'] filename = 'regional_data.csv' path = os.path.join(DATA_INTERMEDIATE, iso3, filename) regional_data =
pd.read_csv(path)
pandas.read_csv
import unittest class PandasTest(unittest.TestCase): def test_import(self): import pandas as pd pd.DataFrame() def test_third_party_import(self): from third_party.python import pandas as pd2 import pandas as pd df =
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 15 17:14:55 2021 @author: sergiomarconi """ import numpy as np import pandas as pd import pickle from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.ensemble import StackingClassifier from sklearn.ensemble import BaggingClassifier from mlxtend.classifier import StackingCVClassifier from sklearn.linear_model import LogisticRegression from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from mlxtend.classifier import SoftmaxRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import normalize from imblearn.ensemble import BalancedRandomForestClassifier from imblearn.ensemble import RUSBoostClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.svm import SVC import category_encoders as ce species_to_genus = { '2PLANT':"NA", 'ABBA':"AB", 'ABLAL':"AB", "ABLO":'AB', "ABMA":"AB", "ABAM":"AB", 'ACNE2': "AC", 'ACNEN': "AC", 'ACPE':"AC", 'ACRU': "AC", 'ACSA3' : "AC", 'ACSA2' :"AC", 'ACFA':"AC2", 'ACKO': "AC2", 'ACGR':"AC2", 'AIAL' : "AI", 'ALRU2': "AL", 'ALVI5':'AL', 'AMLA' : "AM", 'AMEL':'AM', 'ARVIM':"AR", 'BEAL2': "BE", 'BEGL/BENA':"BE", 'BEPA': "BE", 'BELE': "BE", 'BEPO': "BE", 'BETUL':'BE', 'BENE4' : "BE", 'BUBU':"BU", 'BUSI':"BU", 'BOSU2':"BO", 'CACA18':"CA1", 'CADE27':"CA2", 'CAGL8':"CA3", 'CAOV2':"CA3", 'CAOV3':"CA3", 'CAAQ2':'CA', 'CACO15': "CA3", 'CATO6':"CA3", 'CAIL2':"CA3", 'CECA4':"CE1", 'CELA':"CE2", 'CEOC':"CE2", 'CODR':"CO", 'CODI8':"CO", 'COFL2':"CO2", 'DIVI5':"DI", 'ELAN':"EL", 'FAGR':"FA", 'FRAM2':"FR", 'FRAXI':'FR', 'FRNI':'FR', 'LARIX':'LA', 'ILAN':'IL', 'FRPE':"FR", 'GYDI':"GY", 'GUOF':"GU", 'GUSA':"GU", 'GLTR':"GL", 'HALES':"HA", 'JUNI':"JU1", 'JUNIP':"JU2", 'JUVI':"JU2", 'JUOS':"JU2", 'LIST2':"LI1", 'LITU':"LI2", 'MAPO':"MA", 'MAFR':'MA', 'MAGNO':'MA', 'MORU2':"MO", 'NYBI':"NY", 'NYSY':"NY", 'NYAQ2':'NY', 'OXYDE':"OX", 'OXAR':"OX", 'OSVI':'OS', 'PICEA':"PI1", 'PIAL3':"PI2", 'PIAC':"PI3", 'PICO':"PI2", 'PIEL':"PI2", 'PIEN':"PI2", 'PIEC2':"PI2", 'PIFL2':"PI2", 'PIGL':"PI2", 'PIMA':"PI2", 'PINUS':'PI2', 'PIPA2':"PI2", 'PIPO':"PI2", 'PIRU':"PI2", 'PIPOS':"PI2", 'PIPU5':"PI2", 'PIST':"PI2", 'PITA':"PI2", 'PIGL2':"PI2", 'PIED':"PI", 'PIJE':"PI", 'PIRI':'PI', 'PIVI2':'PI', 'PINUS':"PI2", 'PLOC':"PL", 'POTR5':"PO", 'POGR4':"PO", 'PODE3':"PO", 'PRVE':"PR", 'PRVI':"PR", 'PRAV':'PR', 'PRSE2': "PR", 'PRAN3':"PR", 'PSME':"PS", 'QUAL':"QU", 'QUCO2':"QU", 'QUCH':"QU", 'QUCH2':"QU", 'QUHE2':'QU', 'QUERC':"QU", 'QUGE2':"QU", 'QUSH':"QU", 'QULA2':'QU', "QUPH":"QU", 'QULA3':"QU", 'QUERCUS':"QU", 'QULY':"QU", 'QUMA3':"QU", 'QUMA13':"QU", 'THUJA':"TU", 'PISA2':"PI2", 'TABR2':"TA", 'QUDO':"QU", 'MEPO5':'ME', 'QUMI':"QU", 'QUFA':"QU", 'QUMO4':"QU", 'QUMU':"QU", 'QUNI':"QU", 'QUKE':"QU", 'QUVE':'QU', 'QUWI2':"QU", 'QUPA5':"QU", 'QURU':"QU", 'QUST':"QU", 'RHGL':"RH", "ROPS":"RO", 'SASSA':'SA', 'SALIX':'SA', 'SYOC':"SY", 'SILA20':"SI", 'SWMA2':"SW", 'TRSE6':"TR", 'TSCA':"TS", 'TSHE':"TS", 'TIAM':"TI", 'TAHE':"TA", 'ULAL':"UL", 'ULAM':"UL", 'ULMUS':"UL", 'ULCR':"UL", 'ULRU':"UL", } import os os.chdir("/blue/ewhite/s.marconi/NeonSpeciesClassification/") from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd from sklearn.decomposition import PCA from imblearn.over_sampling import SMOTENC from imblearn.over_sampling import ADASYN from imblearn.under_sampling import TomekLinks from collections import Counter from src.hdr import * from sklearn.preprocessing import normalize import numpy as np import pandas as pd import pickle from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.ensemble import StackingClassifier from sklearn.ensemble import BaggingClassifier from mlxtend.classifier import StackingClassifier from sklearn.linear_model import LogisticRegressionCV from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from mlxtend.classifier import SoftmaxRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from imblearn.ensemble import BalancedRandomForestClassifier from imblearn.ensemble import RUSBoostClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier domainid = { "GUAN": "D04", "BART": "D01", "HARV": "D01", "STEI": "D05", "TREE": "D05", "UNDE": "D05", "SERC": "D02", "SCBI": "D02", "OSBS": "D03", "MLBS": "D07", "DELA": "D08", "TALL": "D08", "BLAN": "D02", "UKFS": "D06", "RMNP": "D10", "BONA": "D19", "MOAB": "D13", "DEJU": "D19", "LENO": "D08", "DSNY": "D03", "JERC": "D03", "KONZ": "D06", "CLBJ": "D11", "YELL": "D12", "NIWO": "D13", "ABBY": "D16", "WREF": "D16", "SJER": "D17", "PUUM": "D20" } def categorical_encoder(cats,y): import category_encoders as ce le = LabelEncoder() le.fit(y) le = le.transform(y) enc = ce.LeaveOneOutEncoder(cols=['siteID']) # enc = enc.fit(cats).transform(cats) train_enc = enc.fit_transform(cats,le) return(train_enc) # prepare input data def prepare_inputs(X_train, X_test, cats = ['domainID', 'siteID']): X_train_enc, X_test_enc = list(), list() # label encode each column for i in cats: le = LabelEncoder() le.fit(X_train[i]) # encode train_enc = le.transform(X_train[i]) test_enc = le.transform(X_test[i]) # store X_train_enc.append(train_enc) X_test_enc.append(test_enc) return X_train_enc, X_test_enc min_class = Counter(y_train.taxonID) unsuited = pd.DataFrame(min_class.items()) only_one = unsuited.iloc[:,1]<2 unsuited = unsuited[only_one][0] #unsuited = pd.DataFrame(min_class.items()) doble_unsuited = X_train[y_train.taxonID.isin(unsuited)] X_train=X_train.append(doble_unsuited) doble_unsuited = y_train[y_train.taxonID.isin(unsuited)] y_train=y_train.append(doble_unsuited) min_class = Counter(y_train.taxonID) min_class = min_class[min(min_class, key=min_class.get)] min_class # dimensionality reduction def kld_reduction(brick, kld_out): from sklearn import preprocessing refl = brick.drop(['individualID'], axis=1) scaler = preprocessing.StandardScaler().fit(refl) refl = scaler.transform(refl) kld_groups = getClusters(refl, numBands = 15) np.savetxt(kld_out, kld_groups, delimiter=",") individualID=brick["individualID"] # brick = brick.drop(columns=['individualID']) brick = brick.values all_data = np.zeros([brick.shape[0],1]) for jj in np.unique(kld_groups): which_bands = kld_groups == jj #min new_col = np.apply_along_axis(min, 1, brick[:,which_bands])[...,None] all_data = np.append(all_data, new_col, 1) #mean new_col = np.apply_along_axis(np.mean, 1, brick[:,which_bands])[...,None] all_data = np.append(all_data, new_col, 1) #max new_col = np.apply_along_axis(max, 1, brick[:,which_bands])[...,None] all_data = np.append(all_data, new_col, 1) #reappend individualID on the all data dataframe all_data = pd.DataFrame(all_data) all_data["individualID"] = individualID all_data = all_data.drop([0], axis=1) # #shift back individualID to first row cols = list(all_data.columns) cols = [cols[-1]] + cols[:-1] all_data = all_data[cols] return all_data brick = pd.read_csv("./data/features_0411.csv") #"./data/brdf_spectra_2104b.csv") metadata = pd.read_csv("./data/metadata_0411.csv") #"./data/metadata_2104b.csv") metadata = metadata[["individualID", "groupID", "plotID","siteID","elevation","latitude", "longitude", "taxonID"]] kld_out="./data/tmp_grps.csv"#"./data/kld_grps_2104b.csv" nbands = brick.shape[0] brick.iloc[:,1:nbands] = normalize(brick.iloc[:,1:nbands]) brick = kld_reduction(brick, kld_out) foo = brick.drop(columns=[ 'individualID']) ele1 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_all_df_vst.csv") ele2 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_df_vst.csv") ele3 =
pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR//elevation_sp_3.csv")
pandas.read_csv
import math import numpy as np import scipy.signal import scipy.stats import pandas as pd import matplotlib.pyplot as plt from typing import List, Union, Optional from signalanalysis import tools plt.style.use('seaborn') class Signal: """Base class for general signal, either ECG or VCG Attributes ---------- data : pd.DataFrame Raw ECG data for the different leads filename : str Filename for the location of the data normalised : bool Whether or not the data for the leads have been normalised n_beats : int Number of beats recorded in the trace. Set to 0 if not calculated n_beats_threshold : float (0, 1.0) Threshold value used to determine number of beats in overall signal, expressed as a fraction of the maximum signal strength detected, default=0.5 t_peaks : list of float Time at which peaks are detected in the overall signal beats : list of pd.DataFrame Raw data separated into individual beats beat_index_reset : bool Whether or not the individual data recorded in `beats` has had its time index zeroed (true) or not (false) beat_start : list of float Start times of the individual beats (allows conversion between individual beat data and overall signal data regardless of value of `beat_index_reset`) rms : pd.Series Record of the RMS data for the signal qrs_start : list of float Times calculated for the start of the QRS complex qrs_end : end Times calculated for the end of the QRS complex twave_end : end Times calculated for the end of the T-wave data_source : str Source for the data, if known e.g. Staff III database, CARP simulation, etc. comments : str Any further details known about the data, e.g. sex, age, etc. Methods ------- get_rms(unipolar_only=True) Returns the RMS of the combined signal get_n_beats(threshold=0.5, separation=0.2, plot=False, **kwargs) Splits the full signal into individual beats plot(separate_beats=False) Plot the ECG data """ def __init__(self, **kwargs): """Creates parameters that will be common (or are expected to be common) across all signal types Parameters ---------- normalise : str Whether or not to normalise all ECG leads, default=False filter : {'butterworth', 'savitzky-golay'}, optional Whether to apply a filter to the signal data, and if so, which filter to apply. Keyword arguments for each filter can then be passed (see filters in signalanalysis.tools.maths for details) """ # Properties that can be derived subsequently to opening the file self.data = pd.DataFrame(dtype=float) self.filename = str() self.n_beats = 0 self.n_beats_threshold = 0.5 self.t_peaks = None self.beats = list() self.beat_index_reset = False self.beat_start = None self.rms = None self.qrs_start = list() self.qrs_end = list() self.qrs_duration = list() self.twave_end = list() self.data_source = None self.comments = list() # Keyword arguments (optional) if 'normalise' in kwargs: self.normalised = kwargs.get('normalise') else: self.normalised = bool() # NB: filter must be applied in individual class __init__ functions, as it must be applied after the data # have been read into self.data if 'filter' in kwargs: assert kwargs.get('filter') in ['butterworth', 'savitzky-golay'], "Unknown value for filter_signal passed" self.filter = kwargs.get('filter') else: self.filter = None def reset(self): """Reset all properties of the class Function called when reading in new data into an existing class (for some reason), which would make these properties and attributes clash with the other data """ self.data = pd.DataFrame(dtype=float) self.filename = str() self.n_beats = 0 self.n_beats_threshold = 0.5 self.t_peaks = None self.beats = list() self.beat_index_reset = False self.beat_start = None self.rms = None self.qrs_start = list() self.qrs_end = list() self.qrs_duration = pd.DataFrame(dtype=float) self.twave_end = list() self.data_source = None self.comments = list() self.normalised = bool() self.filter = None def apply_filter(self, **kwargs): """Apply a given filter to the data, using their respective arguments as required See Also -------- :py:meth:`signalanalysis.tools.maths.filter_butterworth` : Potential filter :py:meth:`signalanalysis.tools.maths.filter_savitzkygolay` : Potential filter """ if self.filter == 'butterworth': self.data = tools.maths.filter_butterworth(self.data, **kwargs) elif self.filter == 'savitzky-golay': self.data = tools.maths.filter_savitzkygolay(self.data, **kwargs) def get_rms(self, preprocess_data: pd.DataFrame = None, drop_columns: List[str] = None, **kwargs): """Returns the RMS of the combined signal Parameters ---------- preprocess_data : pd.DataFrame, optional Only passed if there is some extant data that is to be used for getting the RMS (for example, if the unipolar data only from ECG is being used, and the data is thus preprocessed in a manner specific for ECG data in the ECG routine) drop_columns : list of str, optional List of any columns to drop from the raw data before calculating the RMS. Can be used in conjunction with preprocess_data Returns ------- self.rms : pd.Series RMS signal of all combined leads Notes ----- The scalar RMS is calculated according to .. math:: \\sqrt{ \\frac{1}{n}\\sum_{i=1}^n (\\textnormal{ECG}_i^2(t)) } for all leads available from the signal (12 for ECG, 3 for VCG), unless some leads are excluded via the drop_columns parameter. """ if drop_columns is None: drop_columns = list() if preprocess_data is None: signal_rms = self.data.copy() else: signal_rms = preprocess_data if drop_columns is not None: assert all(drop_column in signal_rms for drop_column in drop_columns),\ "Values passed in drop_columns not valid" signal_rms.drop(drop_columns, axis=1, inplace=True) n_leads = len(signal_rms.columns) for key in signal_rms: signal_rms.loc[:, key] = signal_rms[key] ** 2 self.rms = np.sqrt(signal_rms.sum(axis=1) / n_leads) def get_peaks(self, threshold: float = 0.4, min_separation: float = 200, plot: bool = False, **kwargs): """ Get the peaks of the RMS signal. It is often the case that, to determine the number of beats, the peaks in the signal must be determined. This is usually done using the RMS of the signal, as demonstrated here. For alternative methods, this method is overwritten. Parameters ---------- threshold : float {0<1}, optional Minimum value to search for for a peak in RMS signal to determine when a beat has occurred, default=0.33 min_separation : float, optional Minimum separation (in ms) required between neighbouring peaks in a given signal (correlates to the minimum pacing rate expected), default=200ms plot : bool, optional Whether to plot results of beat detection, default=False Returns ------- self.n_beats : int Number of beats in signal self.n_beats_threshold : int A record of the threshold value used to determine when a peak in the signal could potentially be counted as a 'peak' self.t_peaks : list of float Time at which peaks are detected """ # Calculate locations of RMS peaks to determine number and locations of beats if self.rms is None: self.get_rms(**kwargs) min_separation = self.return_to_index(min_separation) i_separation = self.data.index.get_loc(min_separation) i_peaks, _ = scipy.signal.find_peaks(self.rms, height=threshold*max(self.rms), distance=i_separation) self.n_beats = len(i_peaks) self.n_beats_threshold = threshold self.t_peaks = self.rms.index[i_peaks] if plot: _ = self.plot_peaks() return def plot_peaks(self, **kwargs): if self.t_peaks is None: self.get_peaks(**kwargs) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(self.rms) ax.scatter(self.t_peaks, self.rms.loc[self.t_peaks], marker='o', edgecolor='tab:orange', facecolor='none', linewidths=2) return fig, ax def get_beats(self, reset_index: bool = True, offset_start: Optional[float] = None, offset_end: Optional[float] = None, plot: bool = False, **kwargs): """Calculate the number of beats in a given signal, and save the individual beats to the object for later use When given the raw data of a given signal (ECG or VCG), will estimate the number of beats recorded in the trace based on the RMS of the signal exceeding a threshold value. The estimated individual beats will then be saved in a list. Parameters ---------- reset_index : bool Whether to reset the time index for the separated beats so that they all start from zero (true), or whether to leave them with the original time index (false), default=True offset_start, offset_end : float, optional Time in ms to offset from the prior peak (``offset_start``) and from the following peak (``offset_end``) when extracting individual beats, default=None plot : bool Whether to plot the overall signal, showing where the demarcation between individual plots has been made Other Parameters ---------------- unipolar_only : bool, optional Only appropriate for ECG data. Whether to use only unipolar ECG leads to calculate RMS, default=True Returns ------- self.beats : list of pd.DataFrame Individual beats in signal self.beat_start : list of float Times at which the individual beats start in the overall signal data self.beat_index_reset : bool Whether or not the time index for each individual beat has been zeroed (true) or not (false) Notes ----- If ``offset_start`` and ``offset_end`` are set to 0ms, then the n-th beat will be recorded as starting from the point of the (n-1)th peak, and concluding at the point of the (n+1)th peak. This will mean that the beats will be saved in a lossless manner, i.e. saved as [ECG1, ECG2, ..., ECG(n)], where ECG1=[0:peak2], ECG2=[peak1:peak3], ..., ECGn=[peak(n-1):end]; this will lead to excessive duplication of the data between the various recorded peaks, but will ensure that the start of the QRS and T-wave are not accidentally excluded. See Also -------- :py:meth:`signalanalysis.signalplot.general.Signal.get_peaks` : Method to find peaks in the RMS signal :py:meth:`signalanalysis.signalplot.general.Signal.get_rms` : RMS signal calculation required for getting n_beats """ if self.t_peaks is None: self.get_peaks(**kwargs) # If only one beat is detected, can end here self.beat_index_reset = reset_index if self.n_beats == 1: self.beats = [self.data] self.beat_start = [self.data.index[0]] else: # Calculate series of cycle length values, before then using this to estimate the start and end times of # each beat. The offset from the previous peak will be assumed at 0.4*BCL, while the offset from the # following peak will be 0.1*BCL (both with a minimum value of 30ms) if offset_start is None: bcls = np.diff(self.t_peaks) offset_start_list = [max(0.6*bcl, 30) for bcl in bcls] else: offset_start_list = [offset_start]*self.n_beats if offset_end is None: bcls = np.diff(self.t_peaks) offset_end_list = [max(0.1*bcl, 30) for bcl in bcls] else: offset_end_list = [offset_end]*self.n_beats self.beat_start = [self.data.index[0]]+list(self.t_peaks[:-1]+offset_start_list) beat_end = [t_p-offset for t_p, offset in zip(self.t_peaks[1:], offset_end_list)]+[self.data.index[-1]] for t_s, t_e in zip(self.beat_start, beat_end): self.beats.append(self.data.loc[t_s:t_e, :]) if reset_index: for i_beat in range(self.n_beats): zeroed_index = self.beats[i_beat].index-self.beats[i_beat].index[0] self.beats[i_beat].set_index(zeroed_index, inplace=True) if plot: _ = self.plot_beats(offset_end=offset_end, **kwargs) def plot_beats(self, offset_end: Optional[float] = None, **kwargs): if self.beats is None: self.get_beats(offset_end=offset_end, plot=False, **kwargs) if offset_end is None: bcls = np.diff(self.t_peaks) offset_end_list = [max(0.1*bcl, 30) for bcl in bcls] else: offset_end_list = [offset_end]*self.n_beats beat_end = [t_p-offset for t_p, offset in zip(self.t_peaks[1:], offset_end_list)]+[self.data.index[-1]] fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(self.rms, color='C0', label='RMS') # Plot RMS data ax.scatter(self.t_peaks, self.rms.loc[self.t_peaks], marker='o', edgecolor='tab:orange', facecolor='none', linewidths=2) colours = tools.plotting.get_plot_colours(self.n_beats) i_beat = 1 max_height = np.max(self.rms) height_shift = (np.max(self.rms)-np.min(self.rms))*0.1 height_val = [max_height, max_height-height_shift]*math.ceil(self.n_beats/2) for t_s, t_e, col, h in zip(self.beat_start, beat_end, colours, height_val): ax.axvline(t_s, color=col) ax.axvline(t_e, color=col) ax.annotate(text='{}'.format(i_beat), xy=(t_s, h), xytext=(t_e, h), arrowprops=dict(arrowstyle='<->', linewidth=3)) i_beat = i_beat+1 return fig, ax def get_twave_end(self): print("Not coded yet! Don't use!") pass def return_to_index(self, original_sequence: Union[pd.DataFrame, list, float, int]): """Aligns a given sequence of time values to those in the index When manipulating time data, it can often fall out of sync with the time values for the data, e.g. the data are recorded every 2ms, then searching for a value at 5ms will be difficult. This function realigns a sequence of time data to the possible values. Parameters ---------- original_sequence : pd.DataFrame or list or float or int The sequence of numbers to be returned to the correct time index Returns ------- new_sequence : pd.DataFrame or list or float or int The sequence of numbers now corrected to match the time index for the data, returned in the same format as the original sequence was provided, i.e. pd.DataFrame will return pd.DataFrame, int will return int, and so on. """ if isinstance(original_sequence, pd.DataFrame): # Reset all values that are less than 0 or greater than the index to be compared against original_sequence[original_sequence < 0] = 0 original_sequence[original_sequence > self.data.index[-1]] = self.data.index[-1] new_sequence = original_sequence.applymap(lambda y: min(self.data.index, key=lambda x: abs(x-y))) new_sequence[pd.isna(original_sequence)] = float("nan") elif isinstance(original_sequence, list): # This will technically always find the index greater than the requested value, which may be one step # higher than the *actual* closest value, but the difference is judged to be marginal for an approx. five # times faster find! new_sequence = list() for element in original_sequence: if element > self.data.index[-1]: element = self.data.index[-1] # Below expression is recommended instead of np.where(self.data.index >= original_sequence)[0][0] # https://numpy.org/doc/stable/reference/generated/numpy.where.html new_sequence.append(self.data.index[np.asarray(self.data.index >= original_sequence).nonzero()[0][0]]) else: if original_sequence > self.data.index[-1]: original_sequence = self.data.index[-1] try: new_sequence = self.data.index[np.asarray(self.data.index >= original_sequence).nonzero()[0][0]] except IndexError: pass return new_sequence def get_signal_rms(signal: pd.DataFrame, unipolar_only: bool = True) -> List[float]: """Calculate the ECG(RMS) of the ECG as a scalar .. deprecated:: The use of this module is deprecated, and the internal class method should be used in preference ( signalanalysis.signalplot.general.Signal.get_rms()) Parameters ---------- signal: pd.DataFrame ECG or VCG data to process unipolar_only : bool, optional Whether to use only unipolar ECG leads to calculate RMS, default=True Returns ------- signal_rms : list of float Scalar RMS ECG or VCG data Notes ----- The scalar RMS is calculated according to .. math:: \sqrt{\frac{1}{n}\sum_{i=1}^n (\textnormal{ECG}_i^2(t))} for all leads available from the signal (12 for ECG, 3 for VCG). If unipolar_only is set to true, then ECG RMS is calculated using only 'unipolar' leads. This uses V1-6, and the non-augmented limb leads (VF, VL and VR) ..math:: VF = LL-V_{WCT} = \frac{2}{3}aVF ..math:: VL = LA-V_{WCT} = \frac{2}{3}aVL ..math:: VR = RA-V_{WCT} = \frac{2}{3}aVR References ---------- The development and validation of an easy to use automatic QT-interval algorithm <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> PLoS ONE, 12(9), 1–14 (2017) https://doi.org/10.1371/journal.pone.0184352 """ assert isinstance(signal, pd.DataFrame) signal_rms = signal.copy() if unipolar_only and ('V1' in signal_rms.columns): signal_rms['VF'] = (2/3)*signal_rms['aVF'] signal_rms['VL'] = (2/3)*signal_rms['aVL'] signal_rms['VR'] = (2/3)*signal_rms['aVR'] signal_rms.drop(['aVF', 'aVL', 'aVR', 'LI', 'LII', 'LIII'], axis=1, inplace=True) n_leads = len(signal_rms.columns) for key in signal_rms: signal_rms.loc[:, key] = signal_rms[key]**2 signal_rms = np.sqrt(signal_rms.sum(axis=1)/n_leads) return signal_rms def get_bcl(signal_markers: pd.DataFrame): """Function to return estimates of the cycle length for a recording For a given train of signal markers (either QRS start, AT, or other), will estimate the cycle length as the difference between successive markers, with an estimate made that the last cycle length is identical to the preceding beat Parameters ---------- signal_markers : pd.DataFrame Data for the markers to use (usually QRS start for ECG/VCG, or AT for EGM) Returns ------- bcl : pd.DataFrame Data for estimated BCL for each signal """ bcl = signal_markers.diff(axis=0) bcl.drop(0, inplace=True) bcl = bcl.append(pd.DataFrame(bcl[-1:].values, columns=bcl.columns)) bcl.fillna(axis=0, method='ffill', limit=1, inplace=True) bcl.reset_index(drop=True, inplace=True) return bcl def get_twave_end(ecgs: Union[List[pd.DataFrame], pd.DataFrame], leads: Union[str, List[str]] = 'LII', i_distance: int = 200, filter_signal: Optional[str] = None, baseline_adjust: Union[float, List[float], None] = None, return_median: bool = True, remove_outliers: bool = True, plot_result: bool = False) -> List[pd.DataFrame]: """ Return the time point at which it is estimated that the T-wave has been completed Parameters ---------- ecgs : pd.DataFrame or list of pd.DataFrame Signal data, either ECG or VCG leads : str, optional Which lead to check for the T-wave - usually this is either 'LII' or 'V5', but can be set to a list of various leads. If set to 'global', then all T-wave values will be calculated. Will return all values unless return_median flag is set. Default 'LII' i_distance : int, optional Distance between peaks in the gradient, i.e. will direct that the function will only find the points of maximum gradient (representing T-wave, etc.) with a minimum distance given here (in terms of indices, rather than time). Helps prevent being overly sensitive to 'wobbles' in the ecg. Default=200 filter_signal : {'butterworth', 'savitzky-golay'}, optional Whether or not to apply a filter to the data prior to trying to find the actual T-wave gradient. Can pass either a Butterworth filter or a Savitzky-Golay filter, in which case the required kwargs for each can be provided. Default=None (no filter applied) baseline_adjust : float or list of float, optional Point from which to calculate the adjusted baseline for calculating the T-wave, rather than using the zeroline. In line with Hermans et al., this is usually the start of the QRS complex, with the baseline calculated as the median amplitude of the 30ms before this point. return_median : bool, optional Whether or not to return an average of the leads requested, default=True remove_outliers : bool, optional Whether to remove T-wave end values that are greater than 1 standard deviation from the mean from the data. Only has an effect if more than one lead is provided, and return_average is True. Default=True plot_result : bool, optional Whether to plot the results or not, default=False Returns ------- twave_ends : list of pd.DataFrame Time value for when T-wave is estimated to have ended. Notes ----- Calculates the end of the T-wave as the time at which the T-wave's maximum gradient tangent returns to the baseline. The baseline is either set to zero, or set to the median value of 30ms prior to the start of the QRS complex (the value of which has to be passed in the `baseline_adjust` variable). References ---------- .. [1] Postema PG, Wilde AA, "The measurement of the QT interval," Curr Cardiol Rev. 2014 Aug;10(3):287-94. doi:10.2174/1573403x10666140514103612. PMID: 24827793; PMCID: PMC4040880. .. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "The development and validation of an easy to use automatic QT-interval algorithm," PLoS ONE, 12(9), 1–14 (2017), https://doi.org/10.1371/journal.pone.0184352 """ # Process the input arguments to ensure they are of the correct form if isinstance(ecgs, pd.DataFrame): ecgs = [ecgs] if leads == 'global': leads = ecgs[0].columns elif not isinstance(leads, list): leads = [leads] for ecg in ecgs: for lead in leads: assert lead in ecg, "Lead not present in ECG" if filter_signal is not None: assert filter_signal in ['butterworth', 'savitzky-golay'], "Unknown value for filter_signal passed" # Extract ECG data for the required leads, then calculate the gradient and the normalised gradient ecgs_leads = [ecg[leads] for ecg in ecgs] if filter_signal == 'butterworth': ecgs_leads = [tools.maths.filter_butterworth(ecg) for ecg in ecgs_leads] elif filter_signal == 'savitzky-golay': ecgs_leads = [tools.maths.filter_savitzkygolay(ecg) for ecg in ecgs_leads] ecgs_grad = [pd.DataFrame(index=ecg.index, columns=ecg.columns) for ecg in ecgs_leads] ecgs_grad_normalised = [pd.DataFrame(index=ecg.index, columns=ecg.columns) for ecg in ecgs_leads] for i_ecg, ecg in enumerate(ecgs_leads): for lead in ecg: ecg_grad_temp = np.gradient(ecg[lead], ecg.index) ecgs_grad[i_ecg].loc[:, lead] = ecg_grad_temp ecgs_grad_normalised[i_ecg].loc[:, lead] = tools.maths.normalise_signal(ecg_grad_temp) # Calculate the baseline required for T-wave end interpolation baseline_adjust = tools.python.convert_input_to_list(baseline_adjust, n_list=len(ecgs), default_entry=0) baseline_vals = [pd.DataFrame(columns=ecg.columns, index=[0]) for ecg in ecgs_leads] for i_ecg, ecg_leads in enumerate(ecgs_leads): if baseline_adjust[i_ecg] == 0: for lead in ecg_leads: baseline_vals[i_ecg].loc[0, lead] = 0 else: baseline_start = max(0, baseline_adjust[i_ecg]-30) for lead in ecg_leads: baseline_vals[i_ecg].loc[0, lead] = np.median(ecg_leads[lead][baseline_start:baseline_adjust[i_ecg]]) # Find last peak in gradient (with the limitations imposed by only looking for a single peak within the range # defined by i_distance, to minimise the effect of 'wibbles' in the ecg), then by basic trig find the # x-intercept (which is used as the T-wave end point) # noinspection PyUnresolvedReferences i_peaks = [pd.DataFrame(columns=ecg.columns, index=[0]) for ecg in ecgs_leads] twave_ends = [
pd.DataFrame(columns=ecg.columns, index=[0])
pandas.DataFrame
# -*- coding: utf-8 -*- import numpy as np import times import load import pandas import scipy.stats EARTH_OMEGA = 365.242 def diag_pow(M, p): return np.diag(np.diag(M) ** p) def alternative_splines_producer(X, t, factory, use_splines,freq): if (use_splines): # Figure how many years there are t_year = times.days_to_date(t).astype('datetime64[Y]') # The -1 exists so the no spines are created for the last year, # this is because there aren't enogth observations to fit those # splines correctly. years = np.max(t_year) - np.min(t_year) - 1 t_min = np.min(t) for s in range(0, years + 1): t_shift = t - EARTH_OMEGA * s t_transform = np.asarray(factory(t_shift)).T t_transform[t_shift < t_min, :] = 0 #print t_transform.shape, t_shift.shape # print t_shift,t_min n_point_intermezzo=round((freq*60)**2)+5 if s>0: #index=np.logical_and(t_shift >= t_min,t_shift <= t_min) iLag=t_shift < t_min location_points=[i for i,x in enumerate(iLag) if x==1] p1=t_transform[location_points[-1]+1,:] p2=t_transform[location_points[-1]+1+n_point_intermezzo,:] for i in range(int(n_point_intermezzo)): t_transform[location_points[-1]-i,:]=p2/2.0*(n_point_intermezzo-i)/n_point_intermezzo t_transform[location_points[-1]+1+i,:]=p2/2.0*(i)/n_point_intermezzo+p2/2.0 X = np.column_stack( (X , t_transform) ) #print min(X.ravel()),max(X.ravel()) else: X = np.column_stack( (X, np.asarray(factory(t)).T ) ) return X def splines_producer(X, t, factory, use_splines): if (use_splines): # Figure how many years there are t_year = times.days_to_date(t).astype('datetime64[Y]') # The -1 exists so the no spines are created for the last year, # this is because there aren't enogth observations to fit those # splines correctly. years = np.max(t_year) - np.min(t_year) - 1 t_min = np.min(t) for s in range(0, years + 1): t_shift = t - EARTH_OMEGA * s t_transform = np.asarray(factory(t_shift)).T t_transform[t_shift < t_min, :] = 0 X = np.column_stack( (X , t_transform) ) else: X = np.column_stack( (X, np.asarray(factory(t)).T ) ) return X def splines_description(names, factory, use_splines): if (use_splines): # Figure how many years there are t_year = times.days_to_date(time_vector()).astype('datetime64[Y]') # The -1 exists so the no spines are created for the last year, # this is because there aren't enogth observations to fit those # splines correctly. years = np.max(t_year) - np.min(t_year) - 1 for s in range(0, years + 1): names += factory('t - 365.2 * ' + str(s)) else: names += factory('t') return names def time_vector(): """ Construct a time vector containg all the start dates as relative days since year 2000 """ return times.date_to_days(load.dates[:,0]) def design_matrix(t=None, frequencies=18, splines=False): """ Construct the desgin matrix X from the time (t) as: [ 1, t, 0.5 * t^2, cos(2π/265.2 * t), sin(2π/265.2 * t), cos(2π/182.6 * t), sin(2π/182.6 * t), ..., cos(2π/20.3 * t) , sin(2π/20.3 * t) ] """ if (t is None): t = time_vector() # X = [1] X = np.ones((t.size, 1)) # X = [t, 0.5 * t^2] X = splines_producer(X, t, lambda t: [t, 0.5 * (t**2)], use_splines=False) # X = [X, cos, sin, ...] # Max frequency is omega = 365.242 days per year. Samples come # every 10 days; 365/10 ~ 36. nyquist frequency 36/2 = 18 for i in range(1, frequencies + 1): freq = (2 * np.pi)/(EARTH_OMEGA/i) X = splines_producer(X, t, lambda t: [ np.cos(freq * t), np.sin(freq * t) ], use_splines=splines) # Now that X is build, convert it to a matrix. # The matrix will symbolize a 37 dimentional space with 341 values return np.asmatrix(X) def theta_description(frequencies=18, splines=False): """ Construct a simple array containg a description for all the theta values as strings. """ names = ["intercept (1)"] names = splines_description(names, lambda t: [ "vel. (" + t + ")", "acc. (0.5 * (" + t + ")^2)" ], use_splines = False) omega = 365.242 for i in range(1,frequencies + 1): names = splines_description(names, lambda t: [ u"cos(%d * ω * (%s))" % (i, t), u"sin(%d * ω * (%s))" % (i, t) ], use_splines = splines) return names def theta_matrix(frequencies=18, splines=False): """ Construct a theta matrix (180, 360, p) containg all the theta vector for each world position """ X = design_matrix(frequencies=frequencies, splines=splines) # SVD factorize the X matrix, allowing for numerical stable calculation of # the hat matrix (H) U,S,V = np.linalg.svd(X, full_matrices=False) U,S,V = (U, np.diag(S), V.T) # Get the ewh values for all positions as a matrix. This matrix # will have the diffrent positons as collums and days on the rows shape = load.grids.shape Y = np.asmatrix(load.grids.reshape(shape[0] * shape[1], shape[2])).T # This will be a matrix of thetas as collums for each position. # The rows will be the theta parameters Theta = (V * diag_pow(S, -1) * U.T) * Y # Now reshape theta intro a (lat, lon, param) ndarray Theta = np.asarray(Theta).reshape(X.shape[1], shape[0], shape[1]).transpose([1,2,0]) # All done return Theta def pvalue_matrix(frequencies=18, splines=False): Theta = theta_matrix(frequencies=frequencies, splines=splines) H = hat_matrix(frequencies=frequencies, splines=splines) X = design_matrix(frequencies=frequencies, splines=splines) (n, p) = X.shape # SVD factorize the X matrix, allowing for numerical stable calculation of # the hat matrix (H) U,S,V = np.linalg.svd(X, full_matrices=False) U,S,V = (U, np.diag(S), V.T) # Get the ewh values for all positions as a matrix. This matrix # will have the diffrent positons as collums and days on the rows shape = load.grids.shape Y = np.asmatrix(load.grids.reshape(shape[0] * shape[1], shape[2])).T # Compute RMSR (sigma.hat) sigma = np.sqrt(np.sum(np.power(Y - H * Y, 2), 0) / (n - p)) # Compute cov matrix without sigma^2 (makes it meaning full for all positions) S2inv = diag_pow(S, -2) cov_diag = np.asmatrix(np.diag(V * S2inv * V.T)) # Compute a matrix of size(lat * lon, p) with sd(beta) values sd = np.sqrt(cov_diag.T) * sigma # Transform to (lat, lon, p) sd = np.asarray(sd).reshape(X.shape[1], shape[0], shape[1]).transpose([1,2,0]) # Compute t-values, ndarray(lat, lon, p) t_values = Theta / sd # Compute p values prop = scipy.stats.t.cdf(t_values, n - p) return 2 * np.minimum(prop, 1 - prop) def theta_vector(y, frequencies=18, splines=False): """ Contstruct a theta vector (p,) containg all the theta values for a a single given y vector """ X = design_matrix(frequencies=frequencies, splines=splines) # SVD factorize the X matrix, allowing for numerical stable calculation of # the hat matrix (H) U,S,V = np.linalg.svd(X, full_matrices=False) U,S,V = (U, np.diag(S), V.T) # This will be a matrix of thetas as collums for each position. # The rows will be the theta parameters Theta = (V * diag_pow(S, -1) * U.T) * np.asmatrix(y.ravel()).T return Theta def pvalue_vector(y, frequencies=18, splines=False): y = np.asmatrix(y.ravel()).T Theta = theta_vector(y, frequencies=frequencies, splines=splines) H = hat_matrix(frequencies=frequencies, splines=splines) X = design_matrix(frequencies=frequencies, splines=splines) (n, p) = X.shape # SVD factorize the X matrix, allowing for numerical stable calculation of # the hat matrix (H) U,S,V = np.linalg.svd(X, full_matrices=False) U,S,V = (U, np.diag(S), V.T) # Compute RMSR (sigma.hat) sigma = float(np.sqrt(np.sum(np.power(y - H * y, 2), 0) / (n - p))) # Compute cov matrix without sigma^2 (makes it meaning full for all positions) S2inv = diag_pow(S, -2) cov_diag = np.diag(V * S2inv * V.T) # Compute a matrix of size(lat * lon, p) with sd(beta) values sd = np.sqrt(cov_diag) * sigma # Compute t-values, ndarray(lat, lon, p) t_values = Theta.A.ravel() / sd # Compute p values prop = scipy.stats.t.cdf(t_values, n - p) p = 2 * np.minimum(prop, 1 - prop) return p.ravel() def hat_matrix(X=None, interpolate=False, frequencies=18, splines=False): """ Construct the hat matrix, there transforms y intro \hat{y} """ X_source = design_matrix(frequencies=frequencies, splines=splines) if (X is None): X = X_source if (interpolate is True): X_source = X # SVD factorize the X matrix, allowing for numerical stable calculation of # the hat matrix (H) U,S,V = np.linalg.svd(X_source, full_matrices=False) U,S,V = (U, np.diag(S), V.T) # Calculate hat matrix H = X * V * diag_pow(S, -1) * U.T # All done return H def interpolate(latIndex, lonIndex): """ Interpolate the ewh and time values for some latIndex and lonIndex """ x = load.dates[:,0].ravel() y = load.grids[latIndex, lonIndex, :].ravel() # Bind the EWH data to the raw time values df =
pandas.Series(y, index=x)
pandas.Series
# -*- coding: utf-8 -*- """ @author: <NAME> - https://www.linkedin.com/in/adamrvfisher/ """ #This is a time series analysis and strategy testing tool #Kth fold optimization using RSI indicator as a signal #Import modules from KthFoldRSIParamGenerator import KthFoldRSIParamGenerator from KthFoldRSIParamTester import KthFoldRSIParamTester from KthFoldRSIFinalParamTester import KthFoldRSIFinalParamTester from KthFoldRSIDecisionOptimizer import KthFoldRSIDecisionOptimizer from YahooGrabberII import YahooGrabberII import numpy as np import pandas as pd from pandas.parser import CParserError import itertools as it import math #Variable assignment DataSetNames = [] ParameterSets = [] TestSetNames = [] TestParameterSets = [] TestParamsLength = [] FinalParameterSet =
pd.DataFrame()
pandas.DataFrame
import asyncio from collections import defaultdict from datetime import datetime, timedelta, timezone import logging from typing import Dict, KeysView, List, Optional, Set, Tuple, Type, Union import aiomcache import numpy as np import pandas as pd from athenian.api import metadata from athenian.api.async_utils import gather, read_sql_query from athenian.api.controllers.logical_repos import coerce_logical_repos from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter from athenian.api.controllers.miners.github.logical import split_logical_repositories from athenian.api.controllers.miners.github.precomputed_prs import \ discover_inactive_merged_unreleased_prs, MergedPRFactsLoader, \ OpenPRFactsLoader, remove_ambiguous_prs from athenian.api.controllers.miners.github.pull_request import PullRequestMiner from athenian.api.controllers.miners.github.release_load import ReleaseLoader from athenian.api.controllers.miners.jira.issue import generate_jira_prs_query, \ PullRequestJiraMapper from athenian.api.controllers.miners.types import DeploymentConclusion, PRParticipants, \ PullRequestFactsMap from athenian.api.controllers.prefixer import Prefixer from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseSettings from athenian.api.db import add_pdb_hits, add_pdb_misses, Database from athenian.api.models.metadata.github import PullRequest from athenian.api.models.persistentdata.models import DeploymentNotification from athenian.api.tracing import sentry_span class UnfreshPullRequestFactsFetcher: """Fetcher for unfreshed pull requests facts.""" release_loader = ReleaseLoader open_prs_facts_loader = OpenPRFactsLoader merged_prs_facts_loader = MergedPRFactsLoader _log = logging.getLogger(f"{metadata.__package__}.UnfreshPullRequestFactsFetcher") @classmethod @sentry_span async def fetch_pull_request_facts_unfresh(cls, miner: Type[PullRequestMiner], done_facts: PullRequestFactsMap, ambiguous: Dict[str, List[int]], time_from: datetime, time_to: datetime, repositories: Set[str], participants: PRParticipants, labels: LabelFilter, jira: JIRAFilter, pr_jira_mapper: Optional[PullRequestJiraMapper], exclude_inactive: bool, branches: pd.DataFrame, default_branches: Dict[str, str], release_settings: ReleaseSettings, logical_settings: LogicalRepositorySettings, prefixer: Prefixer, account: int, meta_ids: Tuple[int, ...], mdb: Database, pdb: Database, rdb: Database, cache: Optional[aiomcache.Client], ) -> PullRequestFactsMap: """ Load the missing facts about merged unreleased and open PRs from pdb instead of querying \ the most up to date information from mdb. The major complexity here is to comply to all the filters. :return: Map from PR node IDs to their facts. """ assert isinstance(repositories, set) add_pdb_hits(pdb, "fresh", 1) if pr_jira_mapper is not None: done_jira_map_task = asyncio.create_task( pr_jira_mapper.append_pr_jira_mapping(done_facts, meta_ids, mdb), name="append_pr_jira_mapping/done") done_node_ids = {node_id for node_id, _ in done_facts} done_deployments_task = asyncio.create_task( miner.fetch_pr_deployments(done_node_ids, prefixer, account, pdb, rdb), name="fetch_pr_deployments/done", ) blacklist = PullRequest.node_id.notin_any_values(done_node_ids) physical_repos = coerce_logical_repos(repositories) has_logical_repos = physical_repos != repositories tasks = [ # map_releases_to_prs is not required because such PRs are already released, # by definition cls.release_loader.load_releases( repositories, branches, default_branches, time_from, time_to, release_settings, logical_settings, prefixer, account, meta_ids, mdb, pdb, rdb, cache), miner.fetch_prs( time_from, time_to, physical_repos.keys(), participants, labels, jira, exclude_inactive, blacklist, None, branches, None, account, meta_ids, mdb, pdb, cache, columns=[ PullRequest.node_id, PullRequest.repository_full_name, PullRequest.merged_at, PullRequest.user_login, PullRequest.title, ], with_labels=logical_settings.has_prs_by_label(physical_repos)), ] if jira and done_facts: tasks.append(cls._filter_done_facts_jira( miner, done_facts, jira, meta_ids, mdb, cache)) else: async def identity(): return tasks.append(identity()) if not exclude_inactive: tasks.append(cls._fetch_inactive_merged_unreleased_prs( time_from, time_to, repositories, has_logical_repos, participants, labels, jira, default_branches, release_settings, prefixer, account, meta_ids, mdb, pdb, cache)) else: async def dummy_inactive_prs(): return
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ File: transforms.py Author: naughton101 Email: <EMAIL> Github: https://github.com/naught101/empirical_lsm Description: Transformations for empirical_lsm """ import re import pandas as pd import numpy as np import xarray as xr from collections import OrderedDict from sklearn.utils.validation import check_is_fitted from sklearn.base import BaseEstimator, TransformerMixin class Mean(BaseEstimator): """Base class for Linear Models""" def fit(self, X, y): """Fit model.""" self.mean = y.mean(axis=0) def predict(self, X): """return the mean""" return np.tile(self.mean, (X.shape[0], 1)) class LagWrapper(BaseEstimator, TransformerMixin): """Wraps a scikit-learn model, lags the data, and deals with NAs.""" partial_data_ok = True def __init__(self, model, periods=1, freq='30min'): """Lags a dataset. Lags all features. Missing data is dropped for fitting, and replaced with the mean for predict. :periods: Number of timesteps to lag by """ assert isinstance(model, BaseEstimator), "`model` isn't a scikit-learn model" BaseEstimator.__init__(self) TransformerMixin.__init__(self) self.periods = periods self.freq = freq self.model = model def fit(self, X, y=None): """Fit the model with X compute number of output features :X: pandas dataframe :y: Pandas dataframe or series """ if 'site' in X.columns: raise ValueError("site should be an index, not a column") self.n_features = X.shape[1] self.n_outputs = y.shape[1] self.X_mean = X.mean() self.X_cols = X.columns X_lag = self.transform(X, nans='drop') print("LW: Data lagged, fitting with {nk} samples out of {nx}".format(nk=X_lag.shape[0], nx=X.shape[0])) self.model.fit(X_lag, y.ix[X_lag.index]) return self def lag_dataframe(self, df, grouping=None, lagged_only=False, variables=None): """Helper for lagging a dataframe :df: Pandas dataframe with a time index :returns: Dataframe with all columns copied and lagged """ df = pd.DataFrame(df) if not all(df.dtypes == 'float64'): raise ValueError('One or more columns are non-numeric.') if variables is None: shifted = df else: shifted = df[variables] if grouping is not None: shifted = (shifted.reset_index(grouping) .groupby(grouping) .shift(self.periods, self.freq) .reset_index(grouping, drop=True) .set_index(grouping, append=True)) # it would be nice to do this, but https://github.com/pydata/pandas/issues/114524 # shifted = df.groupby(level=grouping).shift(self.periods, self.freq) else: shifted = shifted.shift(self.periods, self.freq) shifted.columns = [c + '_lag' for c in shifted.columns] if lagged_only: return shifted.ix[df.index] else: return df.join(shifted) def fix_nans(self, lagged_df, nans=None): """Remove NAs, replace with mean, or do nothing :df: Pandas dataframe :nans: 'drop' to drop NAs, 'fill' to fill with mean values, None to leave NAs in place. :returns: Dataframe with NANs modified """ if nans == 'drop': return lagged_df.dropna() elif nans == 'fill': # Replace NAs in lagged columns with mean values from fitting step replace = {c + '_lag': {np.nan: self.X_mean[c]} for c in self.X_cols} if hasattr(self, 'y_cols'): replace.update({c + '_lag': {np.nan: self.y_mean[c]} for c in self.y_cols}) lagged_df.replace(replace, inplace=True) return lagged_df else: # return with NAs return lagged_df def transform(self, X, nans=None): """Add lagged features to X :X: Dataframe matching the fit frame. :nans: 'drop' to drop NAs, 'fill' to fill with mean values, None to leave NAs in place. :returns: Dataframe with lagged duplicate columns """ check_is_fitted(self, ['n_features', 'n_outputs']) n_samples, n_features = X.shape if n_features != self.n_features: raise ValueError("X shape does not match training shape") if 'site' in X.index.names: X_lag = self.lag_dataframe(X, grouping='site') else: X_lag = self.lag_dataframe(X) return self.fix_nans(X_lag, nans) def predict(self, X): """Predicts with a model using lagged X :X: Dataframe matching the fit dataframe :returns: prediction based on X """ X_lag = self.transform(X, nans='fill') print("Data lagged, now predicting.") return self.model.predict(X_lag) class MarkovWrapper(LagWrapper): """Wraps a scikit-learn model, Markov-lags the data (includes y values), and deals with NAs.""" partial_data_ok = True def __init__(self, model, periods=1, freq='30min', lag_X=True): """Markov lagged dataset :periods: Number of timesteps to lag by """ super(self.__class__, self).__init__(model, periods, freq) self.lag_X = lag_X def fit(self, X, y): """Fit the model with X compute number of output features :X: pandas dataframe :y: Pandas dataframe or series """ if 'site' in X.columns: raise ValueError("site should be an index, not a column") self.n_features = X.shape[1] self.n_outputs = y.shape[1] self.X_mean = X.mean() self.X_cols = X.columns self.y_mean = y.mean() self.y_cols = y.columns X_lag = self.transform(X, y, nans='drop') print("MW: Data lagged, fitting with {nk} samples out of {nx}".format(nk=X_lag.shape[0], nx=X.shape[0])) self.model.fit(X_lag, y.ix[X_lag.index]) return self def transform(self, X, y=None, nans=None): """Add lagged features of X and y to X :X: features dataframe :y: outputs dataframe :nans: 'drop' to drop NAs, 'fill' to fill with mean values, None to leave NAs in place. :returns: X dataframe with X and y lagged columns """ check_is_fitted(self, ['n_features', 'n_outputs']) n_samples, n_features = X.shape if n_features != self.n_features: raise ValueError("X shape does not match training shape") if 'site' in X.index.names: grouping = 'site' else: grouping = None if self.lag_X: X_lag = self.lag_dataframe(X, grouping=grouping) else: X_lag = X if y is not None: y_lag = self.lag_dataframe(y, grouping=grouping, lagged_only=True) X_lag = pd.merge(X_lag, y_lag, how='left', left_index=True, right_index=True) return self.fix_nans(X_lag, nans) def predict(self, X): """Predicts with a model using lagged X :X: Dataframe matching the fit frame :returns: Dataframe of predictions """ X_lag = self.transform(X, nans='fill') print("Data lagged, now predicting, step by step.") # initialise with mean y values init = pd.concat([X_lag.iloc[0], self.y_mean]).reshape(1, -1) results = [] results.append(self.model.predict(init).ravel()) n_steps = X_lag.shape[0] print('Predicting, step 0 of {n}'.format(n=n_steps), end='\r') for i in range(1, n_steps): if i % 100 == 0: print('Predicting, step {i} of {n}'.format(i=i, n=n_steps), end="\r") x = np.concatenate([X_lag.iloc[i], results[i - 1]]).reshape(1, -1) results.append(self.model.predict(x).ravel()) print('Predicting, step {i} of {n}'.format(i=n_steps, n=n_steps)) # results = pd.DataFrame.from_records(results, index=X_lag.index, columns=self.y_cols) # Scikit-learn models produce numpy arrays, not pandas dataframes results = np.concatenate(results) return results class LagAverageWrapper(BaseEstimator): """Modelwrapper that lags takes Tair, SWdown, RelHum, Wind, and Rainf, and lags them to estimate Qle fluxes.""" partial_data_ok = True def __init__(self, var_lags, model, datafreq=0.5): """Model wrapper :var_lags: OrderedDict like {'Tair': ['cur', '2d'], 'Rainf': ['cur', '2h', '7d', '30d', ... :model: model to use lagged variables with :datafreq: data frequency in hours """ self.var_lags = var_lags self.model = model self.datafreq = datafreq def _lag_array(self, X, var_lags, datafreq): """Lags the input array according to the lags specified in var_lags :X: array with columns matching var_lags :returns: array with original and lagged averaged variables """ lagged_data = [] for i, v in enumerate(var_lags): for l in var_lags[v]: if l == 'cur': lagged_data.append(X[:, [i]]) else: if l.endswith('M'): # Lagged variable minus original variable lag_avg = rolling_mean(X[:, [i]], l[:-1], datafreq=datafreq, shift=1) lagged_data.append(lag_avg - X[:, [i]]) else: lag_avg = rolling_mean(X[:, [i]], l, datafreq=datafreq, shift=1) lagged_data.append(lag_avg) return np.concatenate(lagged_data, axis=1) def _lag_data(self, X, var_lags=None, datafreq=None): """lag an array. Assumes that each column corresponds to variables listed in lags :X: dataframe or ndarray :datafreq: array data rate in hours :returns: array of lagged averaged variables if var_lags is None: var_lags = self.var_lags if datafreq is None: self.datafreq """ if var_lags is None: var_lags = self.var_lags if datafreq is None: datafreq = self.datafreq if isinstance(X, pd.DataFrame): assert all([v in X.columns for v in var_lags]), "Variables in X do not match initialised var_lags" columns = ["%s_%s" % (k, v) for k, l in var_lags.items() for v in l] if 'site' in X.index.names: # split-apply-combine by site results = {} for site in X.index.get_level_values('site').unique(): results[site] = self._lag_array(X.ix[X.index.get_level_values('site') == site, list(var_lags)].values, var_lags, datafreq) result = np.concatenate([d for d in results.values()]) else: result = self._lag_array(X[list(var_lags)].values, var_lags, datafreq) result = pd.DataFrame(result, index=X.index, columns=columns) elif isinstance(X, np.ndarray) or isinstance(X, xr.DataArray): # we have to assume that the variables are given in the right order assert (X.shape[1] == len(var_lags)) if isinstance(X, xr.DataArray): result = self._lag_array(np.array(X), var_lags, datafreq) else: result = self._lag_array(X, var_lags, datafreq) return result def fit(self, X, y, datafreq=None): """fit model using X :X: Dataframe, or ndarray with len(var_lags) columns :y: frame/array with columns to predict """ if datafreq is None: datafreq = self.datafreq lagged_data = self._lag_data(X, datafreq=datafreq) # store mean for filling empty values on predict self._means = np.nanmean(lagged_data, axis=0) fit_idx = np.isfinite(lagged_data).all(axis=1) print("LAW: Data lagged, fitting with {nk} samples out of {nx}".format(nk=sum(fit_idx), nx=X.shape[0])) self.model.fit(lagged_data[fit_idx], y[fit_idx]) def predict(self, X, datafreq=None): """predict model using X :X: Dataframe or ndarray of similar shape :returns: array like y """ if datafreq is None: datafreq = self.datafreq lagged_data = self._lag_data(X, datafreq=datafreq) # fill initial NaN values with mean values for i in range(lagged_data.shape[1]): lagged_data.ix[np.isnan(lagged_data.iloc[:, i]), i] = self._means[i] return self.model.predict(lagged_data) class MarkovLagAverageWrapper(LagAverageWrapper): """Lags variables, and uses markov fitting when fluxes are included""" partial_data_ok = True def __init__(self, var_lags, model, datafreq=0.5): super().__init__(var_lags, model, datafreq) self._x_vars = [] self._x_lags = OrderedDict() self._y_vars = [] self._y_lags = OrderedDict() for k, v in var_lags.items(): if k in ['Qle', 'Qh', 'NEE', 'Rnet', 'Qg']: self._y_vars.append(k) self._y_lags[k] = v else: self._x_vars.append(k) self._x_lags[k] = v self._n_y_lags = np.sum([len(lags) for lags in self._y_lags.values()]) self._n_x_lags = np.sum([len(lags) for lags in self._x_lags.values()]) def fit(self, X, y, datafreq=None): if isinstance(X, pd.DataFrame): assert all(X.columns == list(self._x_vars)) else: # Assume we're being passed stuff innthe right order assert X.shape[1] == len(self._x_vars) assert isinstance(y, pd.DataFrame), "Require dataframe input for y" assert all([v in y.columns for v in list(self._y_vars)]), "Lagged flux is missing from input" self._y_cols = list(y.columns) X_fit =
pd.concat([X, y], axis=1)
pandas.concat
import pandas as pd import matplotlib.pyplot as plt import numpy as np import os breakout_index = 0 prev_breakout_index = 0 num_samples = 90 month = "july" num_samples_split = 10 path = str(num_samples_split) + "_normalized_refined_lfc/" #filename_breakout = "normalized datasets 2/normalized_breakout_" + month + ".xls" filename_goodcast = "normalized datasets 2/normalized_goodcasts_" + month + ".xls" #filename_aug = "normalized_breakout_aug.xls" sample = 0 num_intervals_gc = 0 num_intervals_bo = 0 def split_goodcast(filename, num_intervals_gc): data = pd.read_excel(filename) data = pd.DataFrame(data) data = data.values.tolist() nan_index = [-1] sizes = [] data_required = [] for i in range(0,len(data)): if(np.isnan(data[i][0])): nan_index += [i] for i in range(0, len(nan_index)-1): sizes += [nan_index[i+1] - nan_index[i] - 2] for i in range(1,len(nan_index)): print(i) for index in range(nan_index[i-1]+1, nan_index[i],num_samples_split): print(index) if(nan_index[i] == index + 1): continue data_required = data[index:index+num_samples_split] num_intervals_gc += 1 pd.DataFrame(data_required).to_csv(path + month + "/goodcasts/" + str(num_intervals_gc) + ".csv", header=False, index=False) for index in range(nan_index[-1] + 1, nan_index[-1] + 1 + 1200, num_samples_split): data_required = data[index:index+num_samples_split] num_intervals_gc += 1 pd.DataFrame(data_required).to_csv(path + month + "/goodcasts/" + str(num_intervals_gc) + ".csv", header=False, index=False) def split_breakout_to_set_samples(breakout_index, num_samples, filename, num_intervals_bo): data = pd.read_excel(filename) data = pd.DataFrame(data) data_refined = data.values.tolist() for i in range(0,len(data)): try: data_val = [round(data_refined[i][0]), round(data_refined[i+1][0]), data_refined[i+2][0], data_refined[i+3][0]] if((data_val[0] - data_val[1] >= 1)): prev_breakout_index = breakout_index breakout_index = i if((breakout_index - num_samples_split>0) and (breakout_index - prev_breakout_index >500)): #Check for breakout index being too close and getting non repeating values num_intervals_bo += 1 new_samples = data_refined[(breakout_index - num_samples_split - num_samples):(breakout_index - num_samples_split)] pd.DataFrame(new_samples).to_csv(path + month + "/breakouts_groups/" + str(num_intervals_bo) + ".csv", header=False, index=False) print("yo", len(new_samples), str(breakout_index - num_samples_split - num_samples), str(breakout_index-num_samples_split)) except ValueError: continue except IndexError: continue def split_breakout_to_30_samples(sample): for i in range(1,len(os.listdir(path + month + "/breakouts_groups/"))): data = pd.read_csv(path + month + "/breakouts_groups/" + str(i) + ".csv", header=None) data = pd.DataFrame(data) data_refined = data.values.tolist() for row in range(0,len(data_refined),10): if(len(data_refined[row:row+30])!=30): continue else: sample += 1 row_sample = data_refined[row:row+30] pd.DataFrame(row_sample).to_csv(path + month + "/breakouts/" + str(sample) + ".csv", header=False, index=False) def split_breakout_to_10_samples(sample): for i in range(1,len(os.listdir(path + month + "/breakouts_groups/"))): data = pd.read_csv(path + month + "/breakouts_groups/" + str(i) + ".csv", header=None) data =
pd.DataFrame(data)
pandas.DataFrame
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import dataclasses import json import logging import re from collections import defaultdict, OrderedDict from contextlib import closing from dataclasses import dataclass, field # pylint: disable=wrong-import-order from datetime import datetime, timedelta from typing import ( Any, cast, Dict, Hashable, List, NamedTuple, Optional, Tuple, Type, Union, ) import pandas as pd import sqlalchemy as sa import sqlparse from flask import escape, Markup from flask_appbuilder import Model from flask_babel import lazy_gettext as _ from jinja2.exceptions import TemplateError from sqlalchemy import ( and_, asc, Boolean, Column, DateTime, desc, Enum, ForeignKey, Integer, or_, select, String, Table, Text, update, ) from sqlalchemy.engine.base import Connection from sqlalchemy.orm import backref, Query, relationship, RelationshipProperty, Session from sqlalchemy.orm.mapper import Mapper from sqlalchemy.schema import UniqueConstraint from sqlalchemy.sql import column, ColumnElement, literal_column, table, text from sqlalchemy.sql.elements import ColumnClause from sqlalchemy.sql.expression import Label, Select, TextAsFrom, TextClause from sqlalchemy.sql.selectable import Alias, TableClause from sqlalchemy.types import TypeEngine from superset import app, db, is_feature_enabled, security_manager from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression from superset.errors import ErrorLevel, SupersetError, SupersetErrorType from superset.exceptions import ( QueryObjectValidationError, SupersetGenericDBErrorException, SupersetSecurityException, ) from superset.jinja_context import ( BaseTemplateProcessor, ExtraCache, get_template_processor, ) from superset.models.annotations import Annotation from superset.models.core import Database from superset.models.helpers import AuditMixinNullable, QueryResult from superset.result_set import SupersetResultSet from superset.sql_parse import ParsedQuery from superset.typing import AdhocMetric, Metric, OrderBy, QueryObjectDict from superset.utils import core as utils from superset.utils.core import GenericDataType, remove_duplicates config = app.config metadata = Model.metadata # pylint: disable=no-member logger = logging.getLogger(__name__) VIRTUAL_TABLE_ALIAS = "virtual_table" class SqlaQuery(NamedTuple): extra_cache_keys: List[Any] labels_expected: List[str] prequeries: List[str] sqla_query: Select class QueryStringExtended(NamedTuple): labels_expected: List[str] prequeries: List[str] sql: str @dataclass class MetadataResult: added: List[str] = field(default_factory=list) removed: List[str] = field(default_factory=list) modified: List[str] = field(default_factory=list) class AnnotationDatasource(BaseDatasource): """Dummy object so we can query annotations using 'Viz' objects just like regular datasources. """ cache_timeout = 0 changed_on = None type = "annotation" column_names = [ "created_on", "changed_on", "id", "start_dttm", "end_dttm", "layer_id", "short_descr", "long_descr", "json_metadata", "created_by_fk", "changed_by_fk", ] def query(self, query_obj: QueryObjectDict) -> QueryResult: error_message = None qry = db.session.query(Annotation) qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"]) if query_obj["from_dttm"]: qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"]) if query_obj["to_dttm"]: qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"]) status = utils.QueryStatus.SUCCESS try: df = pd.read_sql_query(qry.statement, db.engine) except Exception as ex: # pylint: disable=broad-except df = pd.DataFrame() status = utils.QueryStatus.FAILED logger.exception(ex) error_message = utils.error_msg_from_exception(ex) return QueryResult( status=status, df=df, duration=timedelta(0), query="", error_message=error_message, ) def get_query_str(self, query_obj: QueryObjectDict) -> str: raise NotImplementedError() def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]: raise NotImplementedError() class TableColumn(Model, BaseColumn): """ORM object for table columns, each table can have multiple columns""" __tablename__ = "table_columns" __table_args__ = (UniqueConstraint("table_id", "column_name"),) table_id = Column(Integer, ForeignKey("tables.id")) table = relationship( "SqlaTable", backref=backref("columns", cascade="all, delete-orphan"), foreign_keys=[table_id], ) is_dttm = Column(Boolean, default=False) expression = Column(Text) python_date_format = Column(String(255)) export_fields = [ "table_id", "column_name", "verbose_name", "is_dttm", "is_active", "type", "groupby", "filterable", "expression", "description", "python_date_format", ] update_from_object_fields = [s for s in export_fields if s not in ("table_id",)] export_parent = "table" @property def is_boolean(self) -> bool: """ Check if the column has a boolean datatype. """ return self.type_generic == GenericDataType.BOOLEAN @property def is_numeric(self) -> bool: """ Check if the column has a numeric datatype. """ return self.type_generic == GenericDataType.NUMERIC @property def is_string(self) -> bool: """ Check if the column has a string datatype. """ return self.type_generic == GenericDataType.STRING @property def is_temporal(self) -> bool: """ Check if the column has a temporal datatype. If column has been set as temporal/non-temporal (`is_dttm` is True or False respectively), return that value. This usually happens during initial metadata fetching or when a column is manually set as temporal (for this `python_date_format` needs to be set). """ if self.is_dttm is not None: return self.is_dttm return self.type_generic == GenericDataType.TEMPORAL @property def db_engine_spec(self) -> Type[BaseEngineSpec]: return self.table.db_engine_spec @property def type_generic(self) -> Optional[utils.GenericDataType]: if self.is_dttm: return GenericDataType.TEMPORAL column_spec = self.db_engine_spec.get_column_spec(self.type) return column_spec.generic_type if column_spec else None def get_sqla_col(self, label: Optional[str] = None) -> Column: label = label or self.column_name db_engine_spec = self.db_engine_spec column_spec = db_engine_spec.get_column_spec(self.type) type_ = column_spec.sqla_type if column_spec else None if self.expression: tp = self.table.get_template_processor() expression = tp.process_template(self.expression) col = literal_column(expression, type_=type_) else: col = column(self.column_name, type_=type_) col = self.table.make_sqla_column_compatible(col, label) return col @property def datasource(self) -> RelationshipProperty: return self.table def get_time_filter( self, start_dttm: DateTime, end_dttm: DateTime, time_range_endpoints: Optional[ Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint] ], ) -> ColumnElement: col = self.get_sqla_col(label="__time") l = [] if start_dttm: l.append( col >= text(self.dttm_sql_literal(start_dttm, time_range_endpoints)) ) if end_dttm: if ( time_range_endpoints and time_range_endpoints[1] == utils.TimeRangeEndpoint.EXCLUSIVE ): l.append( col < text(self.dttm_sql_literal(end_dttm, time_range_endpoints)) ) else: l.append(col <= text(self.dttm_sql_literal(end_dttm, None))) return and_(*l) def get_timestamp_expression( self, time_grain: Optional[str], label: Optional[str] = None ) -> Union[TimestampExpression, Label]: """ Return a SQLAlchemy Core element representation of self to be used in a query. :param time_grain: Optional time grain, e.g. P1Y :param label: alias/label that column is expected to have :return: A TimeExpression object wrapped in a Label if supported by db """ label = label or utils.DTTM_ALIAS pdf = self.python_date_format is_epoch = pdf in ("epoch_s", "epoch_ms") if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) if self.expression: col = literal_column(self.expression) else: col = column(self.column_name) time_expr = self.db_engine_spec.get_timestamp_expr( col, pdf, time_grain, self.type ) return self.table.make_sqla_column_compatible(time_expr, label) def dttm_sql_literal( self, dttm: DateTime, time_range_endpoints: Optional[ Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint] ], ) -> str: """Convert datetime object to a SQL expression string""" dttm_type = self.type or ("DATETIME" if self.is_dttm else None) sql = self.db_engine_spec.convert_dttm(dttm_type, dttm) if dttm_type else None if sql: return sql tf = self.python_date_format # Fallback to the default format (if defined) only if the SIP-15 time range # endpoints, i.e., [start, end) are enabled. if not tf and time_range_endpoints == ( utils.TimeRangeEndpoint.INCLUSIVE, utils.TimeRangeEndpoint.EXCLUSIVE, ): tf = ( self.table.database.get_extra() .get("python_date_format_by_column_name", {}) .get(self.column_name) ) if tf: if tf in ["epoch_ms", "epoch_s"]: seconds_since_epoch = int(dttm.timestamp()) if tf == "epoch_s": return str(seconds_since_epoch) return str(seconds_since_epoch * 1000) return f"'{dttm.strftime(tf)}'" # TODO(john-bodley): SIP-15 will explicitly require a type conversion. return f"""'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'""" @property def data(self) -> Dict[str, Any]: attrs = ( "id", "column_name", "verbose_name", "description", "expression", "filterable", "groupby", "is_dttm", "type", "type_generic", "python_date_format", ) return {s: getattr(self, s) for s in attrs if hasattr(self, s)} class SqlMetric(Model, BaseMetric): """ORM object for metrics, each table can have multiple metrics""" __tablename__ = "sql_metrics" __table_args__ = (UniqueConstraint("table_id", "metric_name"),) table_id = Column(Integer, ForeignKey("tables.id")) table = relationship( "SqlaTable", backref=backref("metrics", cascade="all, delete-orphan"), foreign_keys=[table_id], ) expression = Column(Text, nullable=False) extra = Column(Text) export_fields = [ "metric_name", "verbose_name", "metric_type", "table_id", "expression", "description", "d3format", "extra", "warning_text", ] update_from_object_fields = list( [s for s in export_fields if s not in ("table_id",)] ) export_parent = "table" def get_sqla_col(self, label: Optional[str] = None) -> Column: label = label or self.metric_name tp = self.table.get_template_processor() sqla_col: ColumnClause = literal_column(tp.process_template(self.expression)) return self.table.make_sqla_column_compatible(sqla_col, label) @property def perm(self) -> Optional[str]: return ( ("{parent_name}.[{obj.metric_name}](id:{obj.id})").format( obj=self, parent_name=self.table.full_name ) if self.table else None ) def get_perm(self) -> Optional[str]: return self.perm def get_extra_dict(self) -> Dict[str, Any]: try: return json.loads(self.extra) except (TypeError, json.JSONDecodeError): return {} @property def is_certified(self) -> bool: return bool(self.get_extra_dict().get("certification")) @property def certified_by(self) -> Optional[str]: return self.get_extra_dict().get("certification", {}).get("certified_by") @property def certification_details(self) -> Optional[str]: return self.get_extra_dict().get("certification", {}).get("details") @property def warning_markdown(self) -> Optional[str]: return self.get_extra_dict().get("warning_markdown") @property def data(self) -> Dict[str, Any]: attrs = ( "is_certified", "certified_by", "certification_details", "warning_markdown", ) attr_dict = {s: getattr(self, s) for s in attrs} attr_dict.update(super().data) return attr_dict sqlatable_user = Table( "sqlatable_user", metadata, Column("id", Integer, primary_key=True), Column("user_id", Integer, ForeignKey("ab_user.id")), Column("table_id", Integer, ForeignKey("tables.id")), ) class SqlaTable( # pylint: disable=too-many-public-methods,too-many-instance-attributes Model, BaseDatasource ): """An ORM object for SqlAlchemy table references""" type = "table" query_language = "sql" is_rls_supported = True columns: List[TableColumn] = [] metrics: List[SqlMetric] = [] metric_class = SqlMetric column_class = TableColumn owner_class = security_manager.user_model __tablename__ = "tables" __table_args__ = (UniqueConstraint("database_id", "table_name"),) table_name = Column(String(250), nullable=False) main_dttm_col = Column(String(250)) database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False) fetch_values_predicate = Column(String(1000)) owners = relationship(owner_class, secondary=sqlatable_user, backref="tables") database: Database = relationship( "Database", backref=backref("tables", cascade="all, delete-orphan"), foreign_keys=[database_id], ) schema = Column(String(255)) sql = Column(Text) is_sqllab_view = Column(Boolean, default=False) template_params = Column(Text) extra = Column(Text) baselink = "tablemodelview" export_fields = [ "table_name", "main_dttm_col", "description", "default_endpoint", "database_id", "offset", "cache_timeout", "schema", "sql", "params", "template_params", "filter_select_enabled", "fetch_values_predicate", "extra", ] update_from_object_fields = [f for f in export_fields if f != "database_id"] export_parent = "database" export_children = ["metrics", "columns"] sqla_aggregations = { "COUNT_DISTINCT": lambda column_name: sa.func.COUNT(sa.distinct(column_name)), "COUNT": sa.func.COUNT, "SUM": sa.func.SUM, "AVG": sa.func.AVG, "MIN": sa.func.MIN, "MAX": sa.func.MAX, } def __repr__(self) -> str: return self.name @property def db_engine_spec(self) -> Type[BaseEngineSpec]: return self.database.db_engine_spec @property def changed_by_name(self) -> str: if not self.changed_by: return "" return str(self.changed_by) @property def changed_by_url(self) -> str: if not self.changed_by: return "" return f"/superset/profile/{self.changed_by.username}" @property def connection(self) -> str: return str(self.database) @property def description_markeddown(self) -> str: return utils.markdown(self.description) @property def datasource_name(self) -> str: return self.table_name @property def datasource_type(self) -> str: return self.type @property def database_name(self) -> str: return self.database.name @classmethod def get_datasource_by_name( cls, session: Session, datasource_name: str, schema: Optional[str], database_name: str, ) -> Optional["SqlaTable"]: schema = schema or None query = ( session.query(cls) .join(Database) .filter(cls.table_name == datasource_name) .filter(Database.database_name == database_name) ) # Handling schema being '' or None, which is easier to handle # in python than in the SQLA query in a multi-dialect way for tbl in query.all(): if schema == (tbl.schema or None): return tbl return None @property def link(self) -> Markup: name = escape(self.name) anchor = f'<a target="_blank" href="{self.explore_url}">{name}</a>' return Markup(anchor) def get_schema_perm(self) -> Optional[str]: """Returns schema permission if present, database one otherwise.""" return security_manager.get_schema_perm(self.database, self.schema) def get_perm(self) -> str: return f"[{self.database}].[{self.table_name}](id:{self.id})" @property def name(self) -> str: if not self.schema: return self.table_name return "{}.{}".format(self.schema, self.table_name) @property def full_name(self) -> str: return utils.get_datasource_full_name( self.database, self.table_name, schema=self.schema ) @property def dttm_cols(self) -> List[str]: l = [c.column_name for c in self.columns if c.is_dttm] if self.main_dttm_col and self.main_dttm_col not in l: l.append(self.main_dttm_col) return l @property def num_cols(self) -> List[str]: return [c.column_name for c in self.columns if c.is_numeric] @property def any_dttm_col(self) -> Optional[str]: cols = self.dttm_cols return cols[0] if cols else None @property def html(self) -> str: df =
pd.DataFrame((c.column_name, c.type) for c in self.columns)
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Nov 26 18:20:37 2018 @author: kazuki.onodera separate gal & exgal oversampling """ import numpy as np import pandas as pd import os, gc from glob import glob from tqdm import tqdm import sys sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary') import lgbextension as ex import lightgbm as lgb from multiprocessing import cpu_count import utils, utils_metric #utils.start(__file__) #============================================================================== SUBMIT_FILE_PATH = '../output/1126-1.csv.gz' COMMENT = 'gal: 90 exgal: 112 / oversampling' EXE_SUBMIT = True SEED = np.random.randint(9999) np.random.seed(SEED) print('SEED:', SEED) NFOLD = 5 LOOP = 3 param = { 'objective': 'multiclass', # 'num_class': 14, 'metric': 'multi_logloss', 'learning_rate': 0.5, 'max_depth': 3, 'num_leaves': 63, 'max_bin': 255, 'min_child_weight': 10, 'min_data_in_leaf': 150, 'reg_lambda': 0.5, # L2 regularization term on weights. 'reg_alpha': 0.5, # L1 regularization term on weights. 'colsample_bytree': 0.5, 'subsample': 0.7, # 'nthread': 32, 'nthread': cpu_count(), 'bagging_freq': 1, 'verbose':-1, } #USE_FEATURES = 100 # ============================================================================= # load # ============================================================================= #COL_gal = pd.read_csv('LOG/imp_802_cv_separate.py_gal.csv').head(USE_FEATURES ).feature.tolist() #COL_exgal = pd.read_csv('LOG/imp_802_cv_separate.py_exgal.csv').head(USE_FEATURES ).feature.tolist() COL_gal = pd.read_pickle('../data/807_gal.pkl').columns.tolist() COL_exgal = pd.read_pickle('../data/807_exgal.pkl').columns.tolist() COL = list(set(COL_gal + COL_exgal)) PREFS = sorted(set([c.split('_')[0] for c in COL])) files_tr = [] for pref in PREFS: files_tr += glob(f'../data/train_{pref}*.pkl') files_te = [f'../feature/test_{c}.pkl' for c in COL] files_te = sorted(files_te) sw = False for i in files_te: if os.path.exists(i)==False: print(i) sw = True if sw: raise Exception() else: print('all test file exist!') X = pd.concat([ pd.read_pickle(f) for f in tqdm(files_tr, mininterval=60) ], axis=1)[COL] y = utils.load_target().target target_dict = {} target_dict_r = {} for i,e in enumerate(y.sort_values().unique()): target_dict[e] = i target_dict_r[i] = e y = y.replace(target_dict) if X.columns.duplicated().sum()>0: raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }') print('no dup :) ') gc.collect() # ============================================================================= # separate # ============================================================================= is_gal = pd.read_pickle('../data/tr_is_gal.pkl') X_gal = X[is_gal][COL_gal] y_gal = y[is_gal] X_exgal = X[~is_gal][COL_exgal] y_exgal = y[~is_gal] def target_replace(y): target_dict = {} target_dict_r = {} for i,e in enumerate(y.sort_values().unique()): target_dict[e] = i target_dict_r[i] = e return y.replace(target_dict), target_dict_r y_gal, di_gal = target_replace(y_gal) y_exgal, di_exgal = target_replace(y_exgal) del X, y gc.collect() print(f'X_gal.shape: {X_gal.shape}') print(f'X_exgal.shape: {X_exgal.shape}') # ============================================================================= # oversampling # ============================================================================= """ train: is_gal False True ddf 0 3947 1785 1 1576 540 test: is_gal False True ddf 0 3069681 390283 1 32699 227 (390283 / 227) / (1785 / 540) == 520 (3069681 / 32699) / (3947 / 1576) == 37 """ print('oversampling') from sklearn.model_selection import GroupKFold is_ddf = pd.read_pickle('../data/train.pkl').ddf==1 # ======== for gal ======== X_gal['g'] = np.arange(X_gal.shape[0]) % NFOLD X_gal_d0 = X_gal[~is_ddf] X_gal_d1 = X_gal[is_ddf] li = [X_gal_d0.copy() for i in range(520)] X_gal = pd.concat([X_gal_d1]+li, ignore_index=True) group_gal = X_gal.g y_gal_d0 = y_gal[~is_ddf] y_gal_d1 = y_gal[is_ddf] li = [y_gal_d0.copy() for i in range(520)] y_gal = pd.concat([y_gal_d1]+li, ignore_index=True) del li, X_gal_d0, X_gal_d1, X_gal['g'], y_gal_d0, y_gal_d1 # ======== for exgal ======== X_exgal['g'] = np.arange(X_exgal.shape[0]) % NFOLD X_exgal_d0 = X_exgal[~is_ddf] X_exgal_d1 = X_exgal[is_ddf] li = [X_exgal_d0.copy() for i in range(37)] X_exgal = pd.concat([X_exgal_d1]+li, ignore_index=True) group_exgal = X_exgal.g y_exgal_d0 = y_exgal[~is_ddf] y_exgal_d1 = y_exgal[is_ddf] li = [y_exgal_d0.copy() for i in range(37)] y_exgal = pd.concat([y_exgal_d1]+li, ignore_index=True) del li, X_exgal_d0, X_exgal_d1, X_exgal['g'], y_exgal_d0, y_exgal_d1 group_kfold = GroupKFold(n_splits=NFOLD) print(f'X_gal.shape: {X_gal.shape}') print(f'X_exgal.shape: {X_exgal.shape}') gc.collect() # ============================================================================= # cv(gal) # ============================================================================= print('==== GAL ====') param['num_class'] = 5 dtrain = lgb.Dataset(X_gal, y_gal.values, #categorical_feature=CAT, free_raw_data=False) gc.collect() #model_all = [] nround_mean = 0 wloss_list = [] oofs_gal = [] for i in range(2): gc.collect() param['seed'] = np.random.randint(9999) ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD, fobj=utils_metric.wloss_objective_gal, feval=utils_metric.wloss_metric_gal, early_stopping_rounds=100, verbose_eval=50, folds=group_kfold.split(X_gal, y_gal, group_gal), seed=SEED) oof = ex.eval_oob(X_gal, y_gal.values, models, SEED, stratified=True, shuffle=True, n_class=True) oofs_gal.append(oof) # model_all += models nround_mean += len(ret['wloss-mean']) wloss_list.append( ret['wloss-mean'][-1] ) nround_mean = int((nround_mean/2) * 1.3) utils.send_line(f'nround_mean: {nround_mean}') result = f"CV GAL wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}" utils.send_line(result) # ============================================================================= # model(gal) # ============================================================================= gc.collect() np.random.seed(SEED) model_all_gal = [] for i in range(LOOP): gc.collect() print(f'LOOP:{i}') gc.collect() param['seed'] = np.random.randint(9999) model = lgb.train(param, dtrain, num_boost_round=nround_mean, fobj=utils_metric.wloss_objective_gal, feval=utils_metric.wloss_metric_gal, valid_names=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, evals_result=None, verbose_eval=True, learning_rates=None, keep_training_booster=False, callbacks=None) model_all_gal.append(model) del X_gal, y_gal # ============================================================================= # cv(exgal) # ============================================================================= print('==== EXGAL ====') param['num_class'] = 9 dtrain = lgb.Dataset(X_exgal, y_exgal.values, #categorical_feature=CAT, free_raw_data=False) gc.collect() #model_all = [] nround_mean = 0 wloss_list = [] oofs_exgal = [] for i in range(2): gc.collect() param['seed'] = np.random.randint(9999) ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD, fobj=utils_metric.wloss_objective_exgal, feval=utils_metric.wloss_metric_exgal, early_stopping_rounds=100, verbose_eval=50, folds=group_kfold.split(X_exgal, y_exgal, group_exgal), seed=SEED) oof = ex.eval_oob(X_exgal, y_exgal.values, models, SEED, stratified=True, shuffle=True, n_class=True) oofs_exgal.append(oof) # model_all += models nround_mean += len(ret['wloss-mean']) wloss_list.append( ret['wloss-mean'][-1] ) nround_mean = int((nround_mean/2) * 1.3) utils.send_line(f'nround_mean: {nround_mean}') result = f"CV EXGAL wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}" print(result) utils.send_line(result) # ============================================================================= # model(exgal) # ============================================================================= gc.collect() np.random.seed(SEED) model_all_exgal = [] for i in range(LOOP): gc.collect() print(f'LOOP:{i}') gc.collect() param['seed'] = np.random.randint(9999) model = lgb.train(param, dtrain, num_boost_round=nround_mean, fobj=utils_metric.wloss_objective_exgal, feval=utils_metric.wloss_metric_exgal, valid_names=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, evals_result=None, verbose_eval=True, learning_rates=None, keep_training_booster=False, callbacks=None) model_all_exgal.append(model) # ============================================================================= # test TODO: edit # ============================================================================= X_test = pd.concat([
pd.read_pickle(f)
pandas.read_pickle
import matplotlib.pyplot as plt import pandas as pd import json import numpy as np import seaborn as sns import matplotlib.pyplot as plt import itertools import dataframe_image as dfi from matplotlib.patches import Patch sns.set() df =
pd.read_json('result/lxmert_experiment.json')
pandas.read_json
import GCRCatalogs from GCRCatalogs import GCRQuery import pandas as pd import numpy as np # We load the catalog with addons cat = GCRCatalogs.load_catalog('dc2_object_run2.2i_dr6_with_addons') columns_to_get0 = ["objectId", "Ixx_pixel", "Iyy_pixel", "Ixy_pixel", "IxxPSF_pixel", "IyyPSF_pixel", 'IxyPSF_pixel'] #columns_to_get0 = ["objectId"] columns_to_get2 = ["match_objectId", "cosmodc2_id_truth"] DF0 = cat.catalogs[0].get_quantities(columns_to_get0) DF0 = pd.DataFrame(DF0) print(DF0.head()) DF2 = cat.catalogs[2].get_quantities(columns_to_get2) DF2 =
pd.DataFrame(DF2)
pandas.DataFrame
# calculation of time (in seconds) that elapsed between the stimulation is applied and the VAS # score is register import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # set path path = '../data/data_sub.xlsx' dataFrame = pd.read_excel(path, header=2, sheet_name='trials_ex') headers = dataFrame.columns vasSubjectNeck = [] vasSubjectForearm = [] vasSubjectTactor = [] trials = 5 fields = 6 subjects = 5 speedNeck = [] speedForearm = [] speedTactor = [] for t in range(0, subjects): for u in range(0, len(dataFrame)): # stores the VAS score from each subject in the neck area if dataFrame[headers[(t * fields + 1)]][u] == 3: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) if dataFrame[headers[(t * fields) + 1]][u] == 10: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) if dataFrame[headers[(t * fields) + 1]][u] == 30: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) if dataFrame[headers[(t * fields) + 1]][u] == 50: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) if dataFrame[headers[(t * fields) + 1]][u] == 100: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) if dataFrame[headers[(t * fields) + 1]][u] == 200: vasSubjectNeck.append(dataFrame[headers[t * fields]][u]) speedNeck.append(dataFrame[headers[(t * fields) + 1]][u]) # stores the VAS score from each subject in the foreanr area using axidraw if dataFrame[headers[(t * fields) + 3]][u] == 3: vasSubjectForearm.append(dataFrame[headers[(t * fields) + 2]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) if dataFrame[headers[(t * fields) + 3]][u] == 10: vasSubjectForearm.append(dataFrame[headers[(t * fields) + 2]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) if dataFrame[headers[(t * fields) + 3]][u] == 30: vasSubjectForearm.append(dataFrame[headers[(t * fields + 2)]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) if dataFrame[headers[(t * fields) + 3]][u] == 50: vasSubjectForearm.append(dataFrame[headers[(t * fields + 2)]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) if dataFrame[headers[(t * fields) + 3]][u] == 100: vasSubjectForearm.append(dataFrame[headers[(t * fields + 2)]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) if dataFrame[headers[(t * fields) + 3]][u] == 200: vasSubjectForearm.append(dataFrame[headers[(t * fields + 2)]][u]) speedForearm.append(dataFrame[headers[(t * fields) + 3]][u]) # stores the VAS score from each subject in the foreanr area using tactors if dataFrame[headers[(t * fields) + 5]][u] == 3: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) if dataFrame[headers[(t * fields) + 5]][u] == 10: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) if dataFrame[headers[(t * fields) + 5]][u] == 30: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) if dataFrame[headers[(t * fields) + 5]][u] == 50: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) if dataFrame[headers[(t * fields) + 5]][u] == 100: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) if dataFrame[headers[(t * fields) + 5]][u] == 200: vasSubjectTactor.append(dataFrame[headers[(t * fields) + 4]][u]) speedTactor.append(dataFrame[headers[(t * fields) + 5]][u]) fig1 = plt.figure(1) plot1 = pd.DataFrame({'stimulation velocity': speedNeck, 'VAS score': vasSubjectNeck}) sns.swarmplot(x='stimulation velocity', y='VAS score', data=plot1, size=6, color='k') plt.title('VAS score vs AxiDraw Neck Stimulation') plt.yticks((-10, -5, 0, 5, 10)) plt.ylabel("VAS score", labelpad=-5) fig1.show() fig2 = plt.figure(2) plot2 = pd.DataFrame({'stimulation velocity': speedForearm, 'VAS score': vasSubjectForearm}) sns.swarmplot(x='stimulation velocity', y='VAS score', data=plot2, size=6, color='k') plt.title('VAS score vs AxiDraw Forearm Stimulation') plt.yticks((-10, -5, 0, 5, 10)) plt.ylabel("VAS score", labelpad=-5) fig2.show() fig3 = plt.figure(3) plot3 =
pd.DataFrame({'stimulation velocity': speedTactor, 'VAS score': vasSubjectTactor})
pandas.DataFrame
import pandas as pd def preprocess_flat(flat): # make naming consistent with the other tables flat.rename(columns={'patientunitstayid': 'patient'}, inplace=True) flat.set_index('patient', inplace=True) # admission diagnosis is dealt with in diagnoses.py not flat features flat.drop(columns=['apacheadmissiondx'], inplace=True) flat['gender'].replace({'Male': 1, 'Female': 0}, inplace=True) # convert the categorical features to one-hot flat = pd.get_dummies(flat, columns=['ethnicity', 'unittype', 'unitadmitsource', 'unitstaytype', 'physicianspeciality']) # 2 out of 89123 patients have NaN for age; we fill this with the mean value which is 64 flat['age'].fillna('64', inplace=True) # some of the ages are like '> 89' rather than numbers, this needs removing and converting to numbers # but we make an extra variable to keep this information flat['> 89'] = flat['age'].str.contains('> 89').astype(int) flat['age'] = flat['age'].replace('> ', '', regex=True) flat['age'] = [float(value) for value in flat.age.values] # note that the features imported from the time series have already been normalised # standardisation is for features that are probably normally distributed features_for_standardisation = 'admissionheight' means = flat[features_for_standardisation].mean(axis=0) stds = flat[features_for_standardisation].std(axis=0) # standardise flat[features_for_standardisation] = (flat[features_for_standardisation] - means) / stds # probably not normally distributed features_for_min_max = ['admissionweight', 'age', 'eyes', 'motor', 'verbal', 'hour'] # minus the minimum value and then divide by the maximum value flat[features_for_min_max] -= flat[features_for_min_max].min() flat[features_for_min_max] /= flat[features_for_min_max].max() # preen the features by removing any really uncommon ones maybe - or coalesce some return flat def preprocess_labels(labels): # make naming consistent with the other tables labels.rename(columns={'patientunitstayid': 'patient'}, inplace=True) labels.set_index('patient', inplace=True) labels['actualhospitalmortality'].replace({'EXPIRED': 1, 'ALIVE': 0}, inplace=True) return labels def flat_and_labels_main(eICU_path): print('==> Loading data from labels and flat features files...') flat =
pd.read_csv(eICU_path + 'flat_features.csv')
pandas.read_csv
#!/usr/bin/python3 import json from datetime import date, timedelta from operator import itemgetter from pprint import pprint from typing import Dict, Tuple import jinja2 import pandas as pd from matplotlib import pyplot as plt from rpm import labelCompare # start statistics at this date (used as starting point for graphs) STATS_START_DATE = date.fromisoformat("2019-02-26") VALID_EVENTS = [ "released", "updated", "adopted", "removed", ] with open("../data/packages.json") as FILE: PACKAGES = json.loads(FILE.read()) class Package: def __init__(self, name: str): self.name = name self.last_version = None self.last_updated = None self.sig_package = False self.update_backlog: Dict[Version, int] = dict() def fluffify(version) -> Tuple[str, str, str]: return "0", version, "0" class Version(str): def __hash__(self): return super().__hash__() def __lt__(self, other): return labelCompare(fluffify(self), fluffify(other)) == -1 def __gt__(self, other): return labelCompare(fluffify(self), fluffify(other)) == 1 def __eq__(self, other): return labelCompare(fluffify(self), fluffify(other)) == 0 def __ne__(self, other): return labelCompare(fluffify(self), fluffify(other)) != 0 def __le__(self, other): return labelCompare(fluffify(self), fluffify(other)) != 1 def __ge__(self, other): return labelCompare(fluffify(self), fluffify(other)) != -1 def validate(datum): event = datum["event"] if event not in VALID_EVENTS: print(f"Invalid event: {event}:") pprint(datum) raise Exception("Invalid event") name = datum["package"] if name not in PACKAGES: print(f"Invalid package: {name}:") pprint(datum) raise Exception("Invalid package") datestr = datum["date"] try: date.fromisoformat(datestr) except: print(f"Invalid date: {datestr}:") pprint(datum) raise Exception("Invalid date") version = datum["version"] if version is not None and "-" in version: print(f"Invalid version: {version}:") pprint(datum) raise Exception("Invalid version") def main(): with open("../data/events.json") as file: data = json.loads(file.read()) data.sort(key=itemgetter("date")) for datum in data: validate(datum) start_date = date.fromisoformat(data[0]["date"]) stop_date = date.today() packages: Dict[str, Package] = {name: Package(name) for name in PACKAGES} statistics = dict() # FIXME optimize # iterate over all days current_date = start_date while current_date <= stop_date: stats = dict() statistics[current_date] = stats events = list( filter(lambda x: x["date"] == current_date.isoformat(), data) ) # increment package update backlog lengths for package in packages.values(): for version in package.update_backlog.keys(): package.update_backlog[version] += 1 # update package data according to this day's events for event in events: name: str = event["package"] package: Package = packages[name] etype: str = event["event"] if etype == "adopted": package.sig_package = True continue if etype == "removed": package.sig_package = False continue # for "released" and "updated", there's a version version = Version(event["version"]) if etype == "released": # only update latest version if it's actually greater if package.last_version is None or package.last_version < version: package.last_version = version # initialize backlog duration for new versions with 0 if package.last_updated is None or version > package.last_updated: package.update_backlog[version] = 0 if etype == "updated": # always update latest update, assume it never decreases package.last_updated = version # if the package is updated to the latest version: if package.last_version == package.last_updated: # drop all backlog information package.update_backlog.clear() # if it was updated to a version that's not the latest version: else: # drop all versions that are older than the updated one versions = list(package.update_backlog.keys()) for v in versions: if version >= v: package.update_backlog.pop(v) # filter by packages that are maintained by the SIG sig_packages = { package.name: package for package in filter( lambda p: p.sig_package, packages.values() ) } # calculate update backlogs sum_backlog_len = sum( len(package.update_backlog.keys()) for package in sig_packages.values() ) average_backlog_len = ( sum_backlog_len / len(sig_packages.keys()) if len(sig_packages.keys()) != 0 else 0 ) # calculate maximum update delays sum_backlog_dur = sum( max(package.update_backlog.values(), default=0) for package in sig_packages.values() ) average_backlog_dur = ( sum_backlog_dur / len(sig_packages.keys()) if len(sig_packages.keys()) != 0 else 0 ) # gather number of outdated packages abs_outdated_pkgs = len(list( filter(lambda x: ( len(x.update_backlog.keys()) != 0 and x.sig_package ), packages.values()) )) rel_outdated_pkgs = (abs_outdated_pkgs / len(sig_packages)) if len(sig_packages) != 0 else 0 stats["sig_pkgs"] = len(sig_packages) stats["abs_outdated_pkgs"] = abs_outdated_pkgs stats["rel_outdated_pkgs"] = rel_outdated_pkgs stats["sum_bl_len"] = sum_backlog_len stats["sum_bl_dur"] = sum_backlog_dur stats["avg_bl_len"] = average_backlog_len stats["avg_bl_dur"] = average_backlog_dur # on towards the next day current_date += timedelta(days=1) # print today's package statistics pprint({name: package.__dict__ for name, package in packages.items()}) for name, package in packages.items(): if package.last_version is None: print("No release information for:", name) if package.last_updated is None: print("No update information for:", name) # print package statistics with open("sig_backlog_template.jinja2") as file: template = jinja2.Template(file.read()) stats_document = template.render( packages=packages, package_ods=str(statistics[stop_date]["abs_outdated_pkgs"]), package_num=str(statistics[stop_date]["sig_pkgs"]), package_od_percent=(str(round(statistics[stop_date]["rel_outdated_pkgs"]*100)) + "%"), ) with open("../_pages/sig-backlog.md", "w") as file: file.write(stats_document) # create markdown document of package overview table markdown = list() markdown.append("| package | last updated | last release | status |") markdown.append("| ------- | ------------ | ------------ | ------ |") for name, package in packages.items(): if not package.sig_package: continue if package.last_version == package.last_updated: status = "current" else: days = max([*package.update_backlog.values()] + [0]) status = f"{days} days behind" markdown.append( f"| {package.name} " f"| {package.last_updated} " f"| {package.last_version} " f"| {status} |" ) overview_doc = "\n".join([ "---", "title: Overview", "layout: page", "permalink: /overview/", "---", "", "![SIG Packages](/assets/sig_pkgs.png)", "![Total backlog duration](/assets/sum_bl_dur.png)", "![Total backlog length](/assets/sum_bl_len.png)", "![Average backlog duration](/assets/avg_bl_dur.png)", "![Average backlog length](/assets/avg_bl_len.png)", "![Number of outdated packages](/assets/od_pkgs_abs.png)", "![Ratio of outdated packages](/assets/od_pkgs_rel.png)", "", ] + markdown + [""]) with open("../_pages/sig-overview.md", "w") as file: file.write(overview_doc) # start statistics at 2019-02-26 (one day before packages were added) stat_start_date = STATS_START_DATE stat_stop_date = date.today() # restrict data to interesting time period curr_date = stat_start_date dates = list() # linearize statistics sig_pkgs = list() od_pkgs_abs = list() od_pkgs_rel = list() sum_bl_lens = list() sum_bl_durs = list() avg_bl_lens = list() avg_bl_durs = list() while curr_date <= stat_stop_date: dates.append(curr_date.isoformat()) sig_pkgs.append(statistics[curr_date]["sig_pkgs"]) od_pkgs_abs.append(statistics[curr_date]["abs_outdated_pkgs"]) od_pkgs_rel.append(statistics[curr_date]["rel_outdated_pkgs"]) sum_bl_lens.append(statistics[curr_date]["sum_bl_len"]) sum_bl_durs.append(statistics[curr_date]["sum_bl_dur"]) avg_bl_lens.append(statistics[curr_date]["avg_bl_len"]) avg_bl_durs.append(statistics[curr_date]["avg_bl_dur"]) curr_date += timedelta(days=1) # plot statistics # get first of the month for plot ticks firsts = list() labels = list() for i, d in enumerate(dates): iso = date.fromisoformat(d) if iso.day == 1: firsts.append(i) labels.append(d) df0 =
pd.DataFrame(sig_pkgs, index=dates)
pandas.DataFrame
import numpy as np # The core functionality of NumPy is the ndarray class, a multidimensional (n-dimensional) array. # All elements of the array must be of the same type. x = np.array([[1, 2, 3], [4, 5, 6]]) print("x:\n{}".format(x)) from scipy import sparse # The most important part of SciPy is scipy.sparse: this provides sparse matrices, which are another representation that is used for data in scikit-learn. # Sparse matrices are used whenever we want to store a 2D array that contains mostly zeros. eye = np.eye(4) print("NumPy array:\n{}".format(eye)) # Convert the NumPy array to a SciPy sparse matrix in CSR format. # Only the nonzero entries are stored. sparse_matrix = sparse.csr_matrix(eye) print("SciPy sparse CSR matrix:\n{}".format(sparse_matrix)) # Usually it is not possible to create dense representations of sparse data as they would not fit into memory, so we need to create sparse representations directl.y # Create sparse matrix using COO representation format: data = np.ones(5) row_indices = np.arange(5) col_indices = np.arange(5) eye_coo = sparse.coo_matrix( (data, (row_indices, col_indices)) ) print("COO representation:\n{0}\nNumpy representation:\n{1}".format(eye_coo, data)) # matplotlib is the primary scientific library in Python. It provides funcs for making publication-qualilty visualizations such as line charts, histograms, scatter plots, and so on. # When working in JupyterNotebook, you can show figures by using the [%matplotlib notebook] and [%matplotlib inline] commands. import matplotlib.pyplot as plt x = np.linspace(-10, 10, 1000) # sequence of numbers [-10;10] with 100 steps in between y = np.sin(x) plt.plot(x, y, marker='x') plt.show() # pandas is a lib for data wrangling and analysis. # pandas DataFrame is a table, similar to an Excel spreadsheet. # In contrast to NumPy, which requires that all entries in an array be of the same type, pandas allow each column to have a separate type. # pandas also provides ability to ingest from a great variety of file formats/databases: sql, excel files, csv files. import pandas as pd data = { "Name": ["max", "alex", "mikhail", "kostya"], "Location": ["NY", "Paris", "Berlin", "Kyiv"], "Age": [10, 20, 30, 40] } data_pandas =
pd.DataFrame(data)
pandas.DataFrame
import importlib from experiment_config import experiment_path, chosen_experiment spec = importlib.util.spec_from_file_location(chosen_experiment, experiment_path) config = importlib.util.module_from_spec(spec) spec.loader.exec_module(config) import pandas as pd import numpy as np import random import os def add_noise(df): if config.add_noise: biggest_error_value = (config.smartmeter_voltage_range[1] / config.smartmeter_ratedvoltage_range[ 0]) * config.accuracy times_std = 3 # means biggest error value (i.e maximum value of smartmeter scale) is bigger than 98.8% of distribution mean = 0 num_samples = len(df) calibrated = False while calibrated == False: std = biggest_error_value / times_std # biggest error should be 1% of expected value (or of maximum of scale of measuring device) samples = np.random.normal(mean, std, size=num_samples) if samples.max() > biggest_error_value: times_std = times_std + 0.1 else: calibrated = True if config.just_voltages: df_noised = df[('ElmTerm', 'm:u')] + samples else: df_noised = df.drop([('ElmTerm', 'm:u')], axis = 1) df_noised[('ElmTerm', 'm:u')] = (df[('ElmTerm', 'm:u')] + samples) return df_noised else: if config.just_voltages: try: df_noised = df[('ElmTerm', 'm:u')] except KeyError: df_noised = df else: df_noised = df return df_noised def add_samples(train_samples, test_samples, num_features, sample_dict, samples_per_term, samples_before, label, number_of_samples_per_file, positive_test_samples_so_far, test_samples_so_far, dummy=False): for key in random.sample(list(sample_dict), samples_per_term): sample = sample_dict[key] sample_number = int(len(train_samples.columns) + len(test_samples.columns) / num_features) noised_data = add_noise(sample) if config.train_test_split == int: share_of_test_samples = config.train_test_split / config.number_of_samples else: share_of_test_samples = config.train_test_split if int(1/share_of_test_samples) <= number_of_samples_per_file: criterion1 = (int(number_of_samples_per_file * samples_before * share_of_test_samples) < share_of_test_samples*config.number_of_samples) #should always be true criterion2 = True else: criterion1 = (test_samples_so_far < share_of_test_samples*config.number_of_samples) if (label == 1 and (positive_test_samples_so_far < share_of_test_samples*config.number_of_samples*config.share_of_positive_samples)): criterion2 = True elif (label == 0 and ((test_samples_so_far - positive_test_samples_so_far) < (share_of_test_samples*config.number_of_samples*(1-config.share_of_positive_samples)))): criterion2 = True else: criterion2 = False if sample_number % int(1/share_of_test_samples) == 0 and criterion1 and criterion2: if num_features > 1: for i in noised_data.columns: if dummy: test_samples[(str(sample_number + samples_before), i[1])] = \ [noised_data[i].values.tolist()[0]] * len(noised_data[i]) + [label] else: test_samples[(str(sample_number + samples_before), i[1])] = noised_data[i].values.tolist() + [label] else: if dummy: test_samples[str(sample_number + samples_before)] = \ [noised_data.values.tolist()[0]] * len(noised_data) + [label] else: test_samples[str(sample_number + samples_before)] = noised_data.values.tolist() + [ label] else: if num_features > 1: for i in noised_data.columns: if dummy: train_samples[(str(sample_number + samples_before), i[1])] = \ [noised_data[i].values.tolist()[0]] * len(noised_data[i]) + [label] else: train_samples[(str(sample_number + samples_before), i[1])] = noised_data[i].values.tolist() + [label] else: if dummy: train_samples[str(sample_number + samples_before)] = \ [noised_data.values.tolist()[0]] * len(noised_data) + [label] else: train_samples[str(sample_number + samples_before)] = noised_data.values.tolist() + [ label] return train_samples, test_samples def extract_malfunction_data(df, combinations_already_in_dataset, number_of_samples_before, positive_test_samples_so_far, test_samples_so_far): ''' :param df: :param combinations_already_in_dataset: :return: extracts data of interest (no duplicates) and labels it with 1 for malfunction present, and 0 with no malfunction present also adds noise to data ''' try: metainfo = df[('metainfo', 'in the first', 'few indices')] except KeyError: metainfo = df[str(('metainfo', 'in the first', 'few indices'))] terminals_with_malfunctions = [i for i in metainfo.iloc[5].split("'") if 'Bus' in i] if config.type == 'PV': line = 6 elif config.type == 'EV': line = 7 terminals_with_devices = [i for i in metainfo.iloc[line].split("'") if 'Bus' in i] start_time = metainfo[3].split(': ')[1].split('+')[0] sample_length = config.sample_length samples_to_go = config.number_of_samples - number_of_samples_before share_from_df = 1 / (config.simruns * config.number_of_grids) # share of samples taken from current df if samples_to_go < int(config.number_of_samples * share_from_df): samples_from_df = samples_to_go else: samples_from_df = int(config.number_of_samples * share_from_df) num_positive_samples = samples_from_df * config.share_of_positive_samples num_neg_samples = samples_from_df * (1 - config.share_of_positive_samples) pos_samples_per_term = num_positive_samples / len(terminals_with_malfunctions) neg_samples_per_term = num_neg_samples / (len(terminals_with_devices) - len(terminals_with_malfunctions)) difference_by_flooring = int(pos_samples_per_term) * len(terminals_with_malfunctions) - int(neg_samples_per_term) * ( len(terminals_with_devices) - len(terminals_with_malfunctions)) train_samples = pd.DataFrame(index=df.index[:sample_length].append(
pd.Index(['label'])
pandas.Index
"""dynamic user-input-responsive part of mood, and mood graphs""" from datetime import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from scipy.signal import lsim, lti from scipy.signal.ltisys import StateSpaceContinuous from tqdm.autonotebook import tqdm from IPython.display import display from persistence.response_cache import ( ResponseCache, UserInputIdentifier, ) from feels.mood import ( random_mood_at_pst_datetime, logit_diff_to_pos_sent, pos_sent_to_logit_diff, ) from util.past import MILESTONE_TIMES from util.times import now_pst, fromtimestamp_pst MOOD_IMAGE_DIR = "data/mood_images/" STEP_SEC = 30 * 1 TAU_SEC = 3600 * 12 TAU_SEC_2ND = 60 * 60 WEIGHTED_AVG_START_TIME = pd.Timestamp("2021-01-04 09:10:00") WEIGHTED_AVG_P75_WEIGHT = 0.5 RESPONSE_SCALE_BASE = 0.15 # 0.1 # 0.2 #0.5 DETERMINER_CENTER = -3.1 # -2.4 # -1.5 #-2 DETERMINER_CENTER_UPDATES = { pd.Timestamp("2020-08-20 01:00:00"): -2.4, pd.Timestamp("2020-08-25 14:00:00"): -2.0, pd.Timestamp("2020-08-31 09:15:00"): -2.4, pd.Timestamp("2020-09-16 06:00:00"): -2.1, pd.Timestamp("2020-10-28 17:00:00"): -2.4, pd.Timestamp("2020-11-04 11:00:00"): -2.78, pd.Timestamp("2020-11-13 19:00:00"): -2.7, pd.Timestamp("2020-11-15 07:30:00"): -2.6, pd.Timestamp("2020-12-04 07:00:00"): -2.5, pd.Timestamp("2020-12-10 08:35:00"): -2.35, pd.Timestamp("2020-12-10 23:45:00"): -2.0, pd.Timestamp("2020-12-18 15:35:00"): -2.2, pd.Timestamp("2020-12-21 15:25:00"): -2.3, WEIGHTED_AVG_START_TIME: 0.0, pd.Timestamp("2021-02-08 09:25:00"): -0.25, pd.Timestamp("2021-02-14 17:55:00"): -0.125, pd.Timestamp("2021-02-15 17:25:00"): 0, pd.Timestamp("2021-02-16 17:45:00"): 0.5, pd.Timestamp("2021-02-17 12:45:00"): 0, pd.Timestamp("2021-02-26 17:30:00"): 0.5, pd.Timestamp("2021-02-27 16:05:00"): 0., pd.Timestamp("2021-03-15 09:55:00"): -0.2, pd.Timestamp("2021-03-15 19:50:00"): -0.4, pd.Timestamp("2021-03-20 06:55:00"): 0., pd.Timestamp("2021-03-24 22:40:00"): -0.3, pd.Timestamp("2021-03-31 12:25:00"): -0.5, pd.Timestamp("2021-04-09 07:10:00"): -0.25, pd.Timestamp("2021-05-05 17:00:00"): 0., pd.Timestamp("2021-05-07 18:15:00"): -0.25, pd.Timestamp("2021-05-12 07:50:00"): 0., pd.Timestamp("2021-05-22 09:50:00"): -0.125,
pd.Timestamp("2021-05-23 07:15:00")
pandas.Timestamp
from __future__ import annotations import xml.etree.ElementTree as ET from io import StringIO import fire import numpy as np import pandas as pd from pdfminer.high_level import extract_text_to_fp from tabula import read_pdf def parse_frac_pdf(filepath: str) -> pd.DataFrame: """Parse an Alberta FracFocus.ca PDF into a Pandas Dataframe. Extracts the top table first for metadata about well, then joins on all components of the frac job listed in lower tables - stitches tables together over multiple pages. Parameters ---------- filepath : str Path to PDF file Returns ------- pd.DataFrame Parsed data from FracFocus.ca PDF. """ # Extract full PDF into XML by character. xml_out = StringIO() with open(filepath, "rb") as filepath_in: extract_text_to_fp( filepath_in, xml_out, max_pages=20, output_type="xml", codec=None, ) # Extract header table with metadata first. df_header = read_pdf( filepath, area=[70, 50, 280, 430], guess=False, pandas_options={"header": None}, silent=True, pages=1, ) df_header = df_header[0] df_header = df_header.transpose() df_header.columns = df_header.iloc[0, :] df_header = df_header.reindex(df_header.index.drop(0)) df_header = df_header.astype("str", copy=True) # Start parsing XML of the entire report. root = ET.fromstring(xml_out.getvalue()) dfcols = ["attributes", "text_value", "page"] df_xml = pd.DataFrame(columns=dfcols) page = 1 xml_detection_list = [] while page <= len(root.findall("page")): for text in root[page - 1].iter("text"): attributes = text.attrib text_value = text.text xml_detection_list.append( { "attributes": attributes, "text_value": text_value, "page": page, }, ) page += 1 df_xml =
pd.DataFrame(xml_detection_list)
pandas.DataFrame
# -*- coding: UTF-8 -*- # ******************************************************** # * Author : <NAME> # * Email : <EMAIL> # * Create time : 2021-07-26 17:03 # * Last modified : 2021-07-27 13:17 # * Filename : quant.py # * Description : # ********************************************************* import pandas as pd import os import json from datetime import timedelta, datetime from dplearn.tools import tick_start, tick_end # ============================================================================= # ##### K Line Wrapping ##### # ============================================================================= def wrapKLine(data, open_c, close_c, high_c, low_c, vol_c, ts_c, ts_format, wrap): """ This is a function of wrapping K-Line dataframe into longer-duration one. Input: data: [pandas dataframe] K-Line dataframe open_c: [string] Column name of open price close_c: [string] Column name of close price high_c: [string] Column name of highest price low_c: [string] Column name of lowest price vol_c: [string] Column name of lowest price ts_c: [string] Column name of timestamp ts_format: [string] Format of timestamp in input data (eg: "%Y-%m-%d %H:%M:%S") wrap: [string] Time range that you want to wrap Output: Pandas dataframe """ tick_start("Wraping K Line data") col_list = [open_c, close_c, high_c, low_c, vol_c, ts_c] df = data[col_list] df[ts_c] = pd.to_datetime(df[ts_c], format=ts_format) # df["time_group"] = df[ts_c].dt.strftime("%Y-%m-%d@%H") vol_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[vol_c].agg('sum') open_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[open_c].first() close_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[close_c].last() high_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[high_c].max() low_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[low_c].min() df_new =
pd.DataFrame()
pandas.DataFrame
from typing import Tuple, Optional, Union from requests import Response from requests.exceptions import HTTPError import pandas as pd import numpy as np from logging import Logger from redata.commons.logger import log_stdout from redata.commons.issue_request import redata_request class FigshareInstituteAdmin: """ A Python interface for administration and data curation with institutional Figshare instances Most methods take an ``article_id`` or ``curation_id`` input :param token: Figshare OAuth2 authentication token :param stage: Flag to either use Figshare stage or production API. Default: production :param admin_filter: List of filters to remove admin accounts from user list :param log: Logger object for stdout and file logging. Default: stdout :ivar token: Figshare OAuth2 authentication token :ivar stage: Flag to either use Figshare stage or prod API :ivar baseurl: Base URL of Figshare API :ivar baseurl_institute: Base URL of Figshare API for institutions :ivar headers: HTTP header information :ivar admin_filter: List of filters to remove admin accounts from user list :ivar ignore_admin: Flags whether to remove admin accounts from user list """ def __init__(self, token: str, stage: bool = False, admin_filter: list = None, log: Logger = log_stdout()): self.token = token self.stage = stage if not self.stage: self.baseurl = "https://api.figshare.com/v2/account/" else: self.baseurl = "https://api.figsh.com/v2/account/" self.baseurl_institute = self.baseurl + "institution/" self.headers = {'Content-Type': 'application/json'} if self.token: self.headers['Authorization'] = f'token {self.token}' self.admin_filter = admin_filter if admin_filter is not None: self.ignore_admin = True else: self.ignore_admin = False self.log = log def endpoint(self, link: str, institute: bool = True) -> str: """Concatenate the endpoint to the baseurl for ``requests`` :param link: API endpoint to append to baseurl :param institute: Flag to use regular of institute baseurl :return: URL for HTTPS API """ if institute: return self.baseurl_institute + link else: return self.baseurl + link def get_articles(self, process: bool = True) -> \ Union[pd.DataFrame, Response]: """ Retrieve information about all articles within institutional instance See: https://docs.figshare.com/#private_institution_articles :param process: Returns JSON content from ``redata_request``, otherwise the full request is provided. Default: True :return: Relational database of all articles for an institution or the full ``requests.Response`` """ url = self.endpoint("articles") # Figshare API is limited to a maximum of 1000 per page # Full pagination still needed params = {'page': 1, 'page_size': 1000} articles = redata_request('GET', url, self.headers, params=params, process=process) if process: articles_df =
pd.DataFrame(articles)
pandas.DataFrame
import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input,Output,State from dash.exceptions import PreventUpdate import pandas as pd import shutil import os import dash_table from app import app df =
pd.read_csv('available_datasets.csv',sep='|')
pandas.read_csv
import re from math import log import numpy as np import pandas as pd # parameter lambda lambda1 = 0.9 print('\n===================== terms ========================\n') documents_list = list() with open('./tf-idf.dat', 'r') as f: for line in f.readlines(): line = line.strip() line = re.sub('[^A-Za-z\s]', ' ', line) line = line.lower() documents_list.append(line.split()) terms = list() for document in documents_list: for term in document: if term not in terms: terms.append(term) print(terms) tf = list() for document in documents_list: temp = [0] * len(terms) for term in document: temp[terms.index(term)] += 1 tf.append(temp) # translate to pandas DataFrame pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) raw_freq =
pd.DataFrame(tf, columns=terms)
pandas.DataFrame
import os import re import sys import math import json import bokeh import geopandas import numpy as np import pandas as pd from scipy.interpolate import interp1d from bokeh.io.doc import curdoc from bokeh.layouts import layout from bokeh.plotting import figure from bokeh.models.glyphs import Text from bokeh.application import Application from bokeh.models.callbacks import CustomJS from bokeh.plotting import show as plt_show from bokeh.palettes import brewer,OrRd,YlGn from bokeh.models.widgets import Button,Select from bokeh.tile_providers import Vendors,get_provider from bokeh.io import output_notebook,show,output_file from bokeh.application.handlers import FunctionHandler from bokeh.layouts import widgetbox,row,column,gridplot from bokeh.models import ColumnDataSource,Slider,HoverTool,Select,Div,Range1d,WMTSTileSource,BoxZoomTool,TapTool,Panel,Tabs from bokeh.models import GeoJSONDataSource,LinearColorMapper,ColorBar,NumeralTickFormatter,LinearAxis,Grid,Label,Band,Legend,LegendItem verbose=False enable_GeoJSON_saving=False DATA_UPDATE_DATE='20-October-2021' FORECASTS_UPDATE_DATE='19-October-2021' def apply_corrections(input_df): for state in list(input_df['state'].values): input_df.loc[input_df['state']==state,'state']=re.sub('[^A-Za-z ]+', '',str(state)) input_df.loc[input_df['state']=='Karanataka','state']='Karnataka' input_df.loc[input_df['state']=='Himanchal Pradesh','state']='Himachal Pradesh' input_df.loc[input_df['state']=='Telengana','state']='Telangana' input_df.loc[input_df['state']=='Dadra and Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu' input_df.loc[input_df['state']=='Dadar Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu' input_df.loc[input_df['state']=='Dadra Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu' input_df.loc[input_df['state']=='Daman & Diu','state']='Dadra and Nagar Haveli and Daman and Diu' input_df.loc[input_df['state']=='Daman and Diu','state']='Dadra and Nagar Haveli and Daman and Diu' return input_df def os_style_formatter(input_str): try: os_env=os.environ['OS'] except: os_env='unknown' return str(input_str).replace('/', "\\") if os_env=='Windows_NT' else str(input_str) try: India_statewise=geopandas.read_file('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/GeoJSON_assets/India_statewise_minified.geojson') India_stats=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/Population_stats_India_statewise.csv') covid19_data=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/COVID19_India_statewise.csv') preds_df=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/experimental/output_preds.csv') except: India_GeoJSON_repoFile=os_style_formatter( './GitHub/MoadComputer/covid19-visualization/data/GeoJSON_assets/India_statewise_minified.geojson') covid19_statewise_repoFile=os_style_formatter( './GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/COVID19_India_statewise.csv') India_statewise_statsFile=os_style_formatter( './GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/Population_stats_India_statewise.csv') saved_predsFile=os_style_formatter( './GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/experimental/output_preds.csv') if os.path.exists(India_GeoJSON_repoFile): India_statewise=geopandas.read_file(India_GeoJSON_repoFile) print('Reading India GeoJSON file from saved repo ...') else: sys.exit('Failed to read GeoJSON file for India ...') if os.path.exists(covid19_statewise_repoFile): covid19_data=pd.read_csv(covid19_statewise_repoFile) print('Reading India COVID19 file from saved repo ...') else: sys.exit('Failed to read India COVID19 file ...') if os.path.exists(India_statewise_statsFile): India_stats=pd.read_csv(India_statewise_statsFile) print('Reading India stats file from saved repo ...') else: sys.exit('Failed to read India stats file ...') if os.path.exists(saved_predsFile): preds_df=pd.read_csv(saved_predsFile) else: print('Advanced mode disabled ...') advanced_mode=False India_statewise=apply_corrections(India_statewise) if enable_GeoJSON_saving: India_statewise.to_file("India_statewise_minified.geojson", driver='GeoJSON') India_statewise=India_statewise.to_crs("EPSG:3395") India_stats=apply_corrections(India_stats) if len(covid19_data.columns) ==6: del covid19_data['active_cases'] covid19_data=apply_corrections(covid19_data) covid19_data=pd.merge(covid19_data, India_stats, on='state', how='left') covid19_data_copy=covid19_data.copy() noCOVID19_list = list(set(list(India_statewise.state.values)) -set(list(covid19_data.state))) if verbose: print('A total of: {} states with no reports of COVID19 ...'.format(len(noCOVID19_list))) print('\nStates in India with no COVID19 reports:') for noCOVID19_state in noCOVID19_list: print('\n{} ...'.format(noCOVID19_state)) def covid19_json(covid_df, geo_df,verbose=False): merged_df = pd.merge(geo_df, covid_df, on='state', how='left') try: merged_df = merged_df.fillna(0) except: merged_df.fillna({'total_cases': 0}, inplace=True) merged_df.fillna({'deaths': 0}, inplace=True) merged_df.fillna({'discharged': 0}, inplace=True) if verbose: print('Consider updating GeoPandas library ...') merged_json = json.loads(merged_df.to_json()) json_data = json.dumps(merged_json) return {'json_data': json_data, 'data_frame': merged_df} merged_data = covid19_json(covid19_data, India_statewise, verbose=verbose) merged_json = merged_data['json_data'] def CustomPalette(palette_type, enable_colorInverse=True): if (palette_type.lower()=='OrRd'.lower()) or (palette_type.lower()=='reds'): palette = OrRd[9] elif (palette_type.lower()=='YlGn'.lower()) or (palette_type.lower()=='greens'): palette = YlGn[9] else: palette = brewer['Oranges'] if enable_colorInverse: palette = palette[::-1] else: palette = palette[::1] return palette def CustomHoverTool(advanced_hoverTool, custom_hoverTool, performance_hoverTool, perfstats_hovertool): advancedStats_hover=HoverTool(tooltips ="""<strong><font face="Arial" size="2">@state</font></strong> <br> <hr> <strong><font face="Arial" size="2">Forecast</font></strong> <br> <font face="Arial" size="2">Reported cases: <strong>@total_cases{}</strong></font> <font face="Arial" size="2"><p style="color:red; margin:0">+1 day: <strong>@preds_cases{} (±@preds_cases_std{})</strong></p></font> <font face="Arial" size="2"><p style="color:green; margin:0">+3 days: <strong>@preds_cases_3{} (±@preds_cases_3_std{})</strong></p></font> <font face="Arial" size="2"><p style="color:blue; margin:0">+7 days: <strong>@preds_cases_7{} (±@preds_cases_7_std{})</strong></p></font> <hr> <strong><font face="Arial" size="1">Data updated on: {}</font></strong> <br> <strong><font face="Arial" size="1">Forecasts updated on: {}</font></strong> <br> <strong><font face="Arial" size="1">Forecasts by: https://moad.computer</font></strong> <br> """.format('{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', DATA_UPDATE_DATE, FORECASTS_UPDATE_DATE)) performanceStats_hover=HoverTool(tooltips ="""<strong><font face="Arial" size="2">@state</font></strong> <br> <hr> <strong><font face="Arial" size="2">MAPE</font></strong><br> <strong><font face="Arial" size="1">(Mean Absolute Percentage Error)</font></strong> <font face="Arial" size="2"><p style="color:red; margin:0">+1 day: <strong>@MAPE{}</strong></p></font> <font face="Arial" size="2"><p style="color:green; margin:0">+3 days: <strong>@MAPE_3{}</strong></p></font> <font face="Arial" size="2"><p style="color:blue; margin:0">+7 days: <strong>@MAPE_7{}</strong></p></font> <hr> <strong><font face="Arial" size="1">Data updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Forecasts updated on: {}</font></strong> <br> <strong><font face="Arial" size="1">Forecasts by: https://moad.computer</font></strong> """.format('{(0.000)}', '{(0.000)}', '{(0.000)}', DATA_UPDATE_DATE, FORECASTS_UPDATE_DATE)) simpleStats_hover=HoverTool(tooltips ="""<strong><font face="Arial" size="3">@state</font></strong> <br> <font face="Arial" size="3">Cases: @total_cases{}</font><br> <font face="Arial" size="3">Deaths: @deaths{} </font> <hr> <strong><font face="Arial" size="1">Updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Data from: https://mohfw.gov.in </font></strong> """.format('{(0,0)}', '{(0,0)}', DATA_UPDATE_DATE)) perfStats_hover=HoverTool(tooltips ="""<strong><font face="Arial" size="3">@state</font></strong> <br> <font face="Arial" size="3">Cases: @total_cases{}</font><br> <font face="Arial" size="3">Deaths: @deaths{} </font> <hr> <strong><font face="Arial" size="1">Data updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Forecasts updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Data from: https://mohfw.gov.in </font></strong> """.format('{(0,0)}', '{(0,0)}', DATA_UPDATE_DATE, FORECASTS_UPDATE_DATE)) standard_hover = HoverTool(tooltips = [('State','@state'), ('Cases', '@total_cases'), #('Discharged/migrated', '@discharged'), ('Deaths', '@deaths')]) if performance_hoverTool: hover = performanceStats_hover elif advanced_hoverTool: hover = advancedStats_hover elif custom_hoverTool: hover = simpleStats_hover elif perfstats_hovertool: hover = perfStats_hover else: hover = standard_hover return hover def MapOverlayFormatter(map_overlay): if map_overlay: xmin = 7570000 xmax = 10950000 ymin = 950000 ymax = 4850000 return xmin, xmax, ymin, ymax def geographic_overlay(plt, geosourceJson=None, colorBar=None, colorMapper=None, colorMode='', hoverTool=None, mapOverlay=True, enableTapTool=False, enableToolbar=True): if mapOverlay: wmts = WMTSTileSource(url="https://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png") plt.add_tile(wmts) plt.xaxis.axis_label = 'longitude' plt.yaxis.axis_label = 'latitude' plt.xgrid.grid_line_color = None plt.ygrid.grid_line_color = None plt.axis.visible = False plt.patches('xs','ys', source = geosourceJson, fill_color = {'field' : colorMode, 'transform' : colorMapper}, line_color = 'purple', line_width = 0.5, fill_alpha = 0.60 if enableTapTool else 0.65, nonselection_alpha = 0.65) plt.add_layout(colorBar, 'right') plt.add_tools(hoverTool) if enableTapTool: plt.add_tools(TapTool()) if enableToolbar: plt.toolbar.autohide = True if plt.title is not None: plt.title.text_font_size = '30pt' return plt def lakshadweep_correction(plt, input_df=None, advanced_plotting=False): if advanced_plotting: source = ColumnDataSource(data=dict(x=[8075000], y=[1250000], state=['Lakshadweep'], total_cases=[input_df.loc[input_df['state']=='Lakshadweep','total_cases']], deaths=[input_df.loc[input_df['state']=='Lakshadweep','deaths']], preds_cases=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases']], preds_cases_std=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases_std']], MAPE=[input_df.loc[input_df['state']=='Lakshadweep','MAPE']], preds_cases_3=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases_3']], preds_cases_3_std=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases_3_std']], MAPE_3=[input_df.loc[input_df['state']=='Lakshadweep','MAPE_3']], preds_cases_7=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases_7']], preds_cases_7_std=[input_df.loc[input_df['state']=='Lakshadweep','preds_cases_7_std']], MAPE_7=[input_df.loc[input_df['state']=='Lakshadweep','MAPE_7']] )) else: source = ColumnDataSource(data=dict(x=[8075000], y=[1250000], state=['Lakshadweep'], total_cases=[input_df.loc[input_df['state']=='Lakshadweep','total_cases']], deaths=[input_df.loc[input_df['state']=='Lakshadweep','deaths']])) plt.circle(x='x', y='y', size=25, source=source, line_color='purple', fill_alpha=0.075, nonselection_alpha=0.20, color='blue') return plt def CustomTitleFormatter(): xtext=8350000 ytext=4425000 xbox=9400000 ybox=4575000 return xtext, ytext, xbox, ybox def CustomTitleOverlay(plt, xtext=0, ytext=0, xbox=0, ybox=0, input_df=None, advanced_plotting=False): overlayText=Label(x=xtext, y=ytext, text="COVID19 in India", text_font_size='25pt') plt.add_layout(overlayText) if advanced_plotting: print(covid19_data['total_cases'].sum()) source = ColumnDataSource(data=dict(x=[xbox], y=[ybox], state=['India'], total_cases=[covid19_data['total_cases'].sum()], deaths=[covid19_data['deaths'].sum()], preds_cases=[preds_df['preds_cases'].sum()], preds_cases_std=[preds_df['preds_cases_std'].sum()], MAPE=[preds_df['MAPE'].mean()], preds_cases_3=[preds_df['preds_cases_3'].sum()], preds_cases_3_std=[preds_df['preds_cases_3_std'].sum()], MAPE_3=[preds_df['MAPE_3'].mean()], preds_cases_7=[preds_df['preds_cases_7'].sum()], preds_cases_7_std=[preds_df['preds_cases_7_std'].sum()], MAPE_7=[np.mean(np.abs(preds_df['MAPE_7']))] )) else: source = ColumnDataSource(data=dict(x=[xbox], y=[ybox], state=['India'], total_cases=[input_df['total_cases'].sum()], deaths=[input_df['deaths'].sum()])) plt.rect(x='x', y='y', width=2250000, height=250000, color="#CAB2D6", source=source, line_color='purple', #width_units='screen', #height_units='screen', fill_alpha=0.25) return plt def covid19_plot(covid19_geosource, input_df=None, input_field=None, color_field='total_cases', plot_title=None, map_overlay=True, palette_type='OrRd', integer_plot=False, custom_hovertool=True, enable_LakshadweepStats=True, enable_IndiaStats=False, enable_advancedStats=False, enable_performanceStats=False, enable_foecastPerf=False, enable_toolbar=False): palette = CustomPalette(palette_type, enable_colorInverse=False if enable_performanceStats else True) color_mapper = LinearColorMapper(palette=palette, low=0, high=int(10*(np.ceil(np.max(input_df[color_field].values)/10)))\ if not enable_performanceStats else np.round((np.max(input_df[color_field].values)),3) ) if integer_plot: format_tick=NumeralTickFormatter(format='0,0') else: format_tick=NumeralTickFormatter(format=str(input_df[input_field].values.astype('int')) if not enable_performanceStats else\ str(np.round((input_df[input_field].values.astype('float')),1))) color_bar = ColorBar(color_mapper=color_mapper, label_standoff=14, formatter=format_tick, border_line_color=None, major_label_text_font_size='12px', location = (0, 0)) xmin,xmax,ymin,ymax=MapOverlayFormatter(map_overlay) hover=CustomHoverTool(enable_advancedStats,custom_hovertool,enable_performanceStats,enable_foecastPerf) plt=figure(title = plot_title, x_range=(xmin, xmax) if map_overlay else None, y_range=(ymin, ymax) if map_overlay else None, tools='save' if enable_toolbar else '', plot_height = 530, plot_width = 530, toolbar_location = 'left' if enable_toolbar else None, lod_factor=int(1e7), lod_threshold=int(2), # output_backend="webgl" ) plt=geographic_overlay(plt, geosourceJson=covid19_geosource, colorBar=color_bar, colorMapper=color_mapper, colorMode=input_field, hoverTool=hover, mapOverlay=map_overlay, enableToolbar=enable_toolbar, enableTapTool=True if ((enable_advancedStats) or (enable_performanceStats)) else False) if enable_LakshadweepStats: plt=lakshadweep_correction(plt, input_df=input_df, advanced_plotting=True if ((enable_advancedStats) or (enable_performanceStats)) else False) if enable_IndiaStats: xtext,ytext,xbox,ybox=CustomTitleFormatter() plt=CustomTitleOverlay(plt, xtext=xtext, ytext=ytext, xbox=xbox, ybox=ybox, input_df=input_df, advanced_plotting=True if ((enable_advancedStats) or (enable_performanceStats)) else False) plt.xaxis.major_tick_line_color=None plt.yaxis.major_tick_line_color=None plt.xaxis.minor_tick_line_color=None plt.yaxis.minor_tick_line_color=None plt.xaxis[0].ticker.num_minor_ticks=0 plt.yaxis[0].ticker.num_minor_ticks=0 plt.yaxis.formatter=NumeralTickFormatter(format='0,0') return plt advanced_mode=True covid19_geosource=GeoJSONDataSource(geojson=merged_json) plot_title=None#'COVID19 outbreak in India' app_title='COVID19 India' India_totalCases=covid19_data['total_cases'].sum() India_totalDeaths=covid19_data['deaths'].sum() print(India_totalCases) basic_covid19_plot = covid19_plot(covid19_geosource, input_df=covid19_data, input_field='total_cases', color_field='total_cases', enable_IndiaStats=True, integer_plot=True, plot_title=plot_title) basicPlot_tab = Panel(child=basic_covid19_plot, title="⌂") if advanced_mode: preds_df.columns=['id','state', \ 'preds_cases_7', 'preds_cases_3', 'preds_cases', \ 'preds_cases_7_std', 'preds_cases_3_std', 'preds_cases_std', \ 'MAPE', 'MAPE_3', 'MAPE_7'] print(preds_df.head(10)) print(covid19_data_copy.head(10)) preds_covid19_df=pd.merge(covid19_data_copy, preds_df, on='state', how='left') preds_covid19_df=preds_covid19_df.fillna(0) print(preds_covid19_df.head(10)) try: del preds_covid19_df['ID'] except: print('Unable to delete dataframe item: ID') try: del preds_covid19_df['id'] except: print('Unable to delete dataframe item: id') try: del preds_covid19_df['discharged'] except: print('Unable to delete dataframe item: discharged') merged_preds_data=covid19_json(preds_covid19_df,India_statewise) merged_preds_json=merged_preds_data['json_data'] preds_covid19_data=merged_preds_data['data_frame'] print(preds_covid19_data['state'].equals(covid19_data['state'])) print(set(list(preds_covid19_data['state']))-set(list(covid19_data['state']))) preds_covid19_geosource=GeoJSONDataSource(geojson=merged_preds_json) advanced_covid19_plot=covid19_plot(preds_covid19_geosource, input_df=preds_covid19_data, input_field='preds_cases_7', color_field='total_cases', enable_IndiaStats=True, enable_advancedStats=True, integer_plot=True, plot_title=None) advancedPlot_tab=Panel(child=advanced_covid19_plot, title="Forecast") performance_covid19_plot=covid19_plot(preds_covid19_geosource, input_df=preds_covid19_data, palette_type='Greens', input_field='MAPE_7', color_field='MAPE_7', enable_IndiaStats=True, enable_performanceStats=True, plot_title=None) performancePlot_tab=Panel(child=performance_covid19_plot,title="Forecast quality") def LineSmoothing(x,y, interpolationType='cubic', interpolationPoints=1000): fn=interp1d(x,y, kind=interpolationType) x_=np.linspace(np.min(x), np.max(x), interpolationPoints) y_=fn(x_) return x_,y_ def model_performancePlot(source, use_cds=False, enable_interpolation=False, custom_perfHoverTool=True): if use_cds: plotIndex=source.data['plot_index'] plotIndex_labels=source.data['plot_labels'] dateLabels={i: date for i, date in enumerate(plotIndex_labels)} x=source.data['x'] else: plotIndex_labels=list(source['date'].astype('str')) modelPerformance=source.dropna() x=[i for i in range(len(list(source['date'].astype('str'))))] y_cases=list(source['total_cases'].astype('int')) y_preds=list(source['preds_cases'].astype('int')) y_preds3=list(source['preds_cases_3'].astype('int')) y_preds7=list(source['preds_cases_7'].astype('int')) y_stdev=list(source['preds_cases_std'].astype('int')) y_3_stdev=list(source['preds_cases_3_std'].astype('int')) y_7_stdev=list(source['preds_cases_7_std'].astype('int')) lower_lim=list(np.asarray(y_preds)-3*np.asarray(y_stdev)) lower_3_lim=list(np.asarray(y_preds3)-3*np.asarray(y_3_stdev)) lower_7_lim=list(np.asarray(y_preds7)-3*np.asarray(y_7_stdev)) upper_lim=list(np.asarray(y_preds)+3*np.asarray(y_stdev)) upper_3_lim=list(np.asarray(y_preds3)+3*np.asarray(y_3_stdev)) upper_7_lim=list(np.asarray(y_preds7)+3*np.asarray(y_7_stdev)) plotIndex=list(source['date'].astype('str')) dateLabels={i: date for i, date in enumerate(plotIndex)} source=ColumnDataSource({'x':x,'plot_index':plotIndex,'plot_labels':plotIndex_labels, 'y_cases':y_cases,'y_preds':y_preds,'y_preds3':y_preds3,'y_preds7':y_preds7, 'y_std':y_stdev,'y_3std':y_3_stdev,'y_7std':y_7_stdev, 'upper_lim':upper_lim,'upper_3_lim':upper_3_lim,'upper_7_lim':upper_7_lim, 'lower_lim':lower_lim,'lower_3_lim':lower_3_lim,'lower_7_lim':lower_7_lim}) if enable_interpolation: x_cases_interpol,y_cases_interpol=LineSmoothing(x,y_cases) x_preds_interpol,y_preds_interpol=LineSmoothing(x,y_preds) x_preds3_interpol,y_preds3_interpol=LineSmoothing(x,y_preds3) x_preds7_interpol,y_preds7_interpol=LineSmoothing(x,y_preds7) if len(plotIndex)%2==0 or len(plotIndex)%5==0 or np.round((len(plotIndex)/10)/(len(plotIndex)//10))==1: for i in range( len(plotIndex)//2 ): dateLabelObject=datetime.strptime(str(dateLabels[len(plotIndex)-1]),'%d-%B-%Y') dateLabel_extra=dateLabelObject+timedelta(days=(i+1)) dateLabels.update({len(plotIndex)+i:str(dateLabel_extra.strftime('%d-%B-%Y')) }) data_cases=dict(title=['report' \ for i in range(len(x))], plotIndex=plotIndex, x='x', y='y_cases', source=source) data_preds=dict(title=['forecast a day before'\ for i in range(len(x))], plotIndex='plot_index', x='x', y='y_preds', source=source) data_preds3=dict(title=['forecast 3 days before'\ for i in range(len(x))], plotIndex='plot_index', x='x', y='y_preds3', source=source) data_preds7=dict(title=['forecast 7 days before'\ for i in range(len(x))], plotIndex='plot_index', x='x', y='y_preds7', source=source) TOOLTIPS = """<strong><font face="Arial" size="2">Forecast performance for @plot_index</font></strong> <br> <font face="Arial" size="2"><p style="color:black; margin:0">Reported cases: <strong>@y_cases{}</strong></p></font> <font face="Arial" size="2"><p style="color:red; margin:0">Forecast a day ago: <strong>@y_preds{} (±@y_std{})</strong></p></font> <font face="Arial" size="2"><p style="color:green; margin:0">Forecast 3 days ago: <strong>@y_preds3{} (±@y_3std{})</strong></p></font> <font face="Arial" size="2"><p style="color:blue; margin:0">Forecast 7 days ago: <strong>@y_preds7{} (±@y_7std{})</strong></p></font> <hr> <strong><font face="Arial" size="1">Data updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Forecasts updated on: {}</font></strong><br> <strong><font face="Arial" size="1">Forecasts by: https://moad.computer</font></strong>""".format('{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', '{(0,0)}', DATA_UPDATE_DATE, FORECASTS_UPDATE_DATE) \ if custom_perfHoverTool else [('Date: ','@plot_index'), ('Cases: ','@y_cases')] perfPlot=figure(#y_axis_type="log",y_range=(2.5e4,7.5e4), y_axis_location='left', plot_height=500, plot_width=500, tools='hover', toolbar_location=None, tooltips=TOOLTIPS) perfPlot.line(x='x',y='y_cases', source=source, line_width=2.5, color='black') r = perfPlot.circle(x='x', y='y_cases', color='grey', fill_color='black', size=8, source=source) perfPlot.line(x='x',y='y_preds', source=source, color='darkred') r1 = perfPlot.circle(x='x', y='y_preds', color='darkred', fill_color='red', size=8, source=source) perfPlot.line(x='x',y='y_preds3', source=source, color='green') r3 = perfPlot.circle(x='x', y='y_preds3', color='lime', fill_color='darkgreen', size=8, source=source) perfPlot.line(x='x',y='y_preds7', source=source, color='blue') r7=perfPlot.circle(x='x', y='y_preds7', color='purple', fill_color='blue', size=8, source=source) #x_ul_interpol,ul_interpol=LineSmoothing(x,upper_lim) #x_ll_interpol,ll_interpol=LineSmoothing(x,lower_lim) #src_interpol=ColumnDataSource({'x_ul_interpol':x_ul_interpol,'ul_interpol':ul_interpol, # 'x_ll_interpol':x_ll_interpol,'ll_interpol':ll_interpol}) #ul=perfPlot.line(x='x_ul_interpol', # y='ul_interpol', # source=src_interpol, # color='pink') #ll=perfPlot.line(x='x_ll_interpol', # y='ll_interpol', # source=src_interpol, # color='pink') perfPlot.hover.renderers=[r,r1,r3,r7] perfPlot.yaxis.formatter.use_scientific=False perfPlot.yaxis.formatter=NumeralTickFormatter(format='0,0') perfPlot.xaxis.major_label_overrides=dateLabels perfPlot.xaxis.axis_label='Date' perfPlot.yaxis.axis_label=' ' perfPlot.yaxis.axis_label_text_align='left' #perfPlot.yaxis.axis_label_text_font='arial' perfPlot.xaxis.axis_label_text_font='arial' perfPlot.xaxis.major_label_text_font='arial' perfPlot.yaxis.major_label_text_font='arial' perfPlot.add_layout(LinearAxis(axis_label='COVID19 cases', axis_label_text_font='arial', major_tick_line_color=None, minor_tick_line_color=None, major_label_text_font_size='0pt', major_label_orientation=math.pi), 'right') perfPlot.yaxis.major_label_orientation=(math.pi*.75)/2 perfPlot.xaxis.major_label_orientation=(math.pi*.75)/2 band=Band(base='x',lower='lower_lim',upper='upper_lim',source=source, level='underlay',fill_alpha=0.5,line_width=1, fill_color='indianred',line_color='indianred') band3=Band(base='x',lower='lower_3_lim',upper='upper_3_lim',source=source, level='underlay',fill_alpha=0.4,line_width=1, fill_color='lime',line_color='lime') band7=Band(base='x',lower='lower_7_lim',upper='upper_7_lim',source=source, level='underlay',fill_alpha=0.25,line_width=1, fill_color='indigo',line_color='indigo') perfPlot.renderers.append(band) perfPlot.renderers.append(band3) perfPlot.renderers.append(band7) return perfPlot from datetime import datetime, timedelta def date_formatter(x): datetimeobject = datetime.strptime(str(x),'%Y%m%d') return datetimeobject.strftime('%d-%B-%Y') def make_dataset(state): DATA_SOURCE='https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/experimental/model_performance_' DATA_URL='{}{}.csv'.format(DATA_SOURCE, state) DATA_URL=DATA_URL.replace(" ", "%20") DATA_FILE=os_style_formatter( './GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/experimental/model_performance_{}.csv'.format( state)) try: modelPerformance=
pd.read_csv(DATA_URL)
pandas.read_csv
# Arithmetic tests for DataFrame/Series/Index/Array classes that should # behave identically. # Specifically for Period dtype import operator import numpy as np import pytest from pandas._libs.tslibs.period import IncompatibleFrequency from pandas.errors import PerformanceWarning import pandas as pd from pandas import Period, PeriodIndex, Series, period_range from pandas.core import ops from pandas.core.arrays import TimedeltaArray import pandas.util.testing as tm from pandas.tseries.frequencies import to_offset # ------------------------------------------------------------------ # Comparisons class TestPeriodArrayLikeComparisons: # Comparison tests for PeriodDtype vectors fully parametrized over # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison # tests will eventually end up here. def test_compare_zerodim(self, box_with_array): # GH#26689 make sure we unbox zero-dimensional arrays xbox = box_with_array if box_with_array is not pd.Index else np.ndarray pi = pd.period_range("2000", periods=4) other = np.array(pi.to_numpy()[0]) pi = tm.box_expected(pi, box_with_array) result = pi <= other expected = np.array([True, False, False, False]) expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) class TestPeriodIndexComparisons: # TODO: parameterize over boxes @pytest.mark.parametrize("other", ["2017", 2017]) def test_eq(self, other): idx = PeriodIndex(["2017", "2017", "2018"], freq="D") expected = np.array([True, True, False]) result = idx == other tm.assert_numpy_array_equal(result, expected) def test_pi_cmp_period(self): idx = period_range("2007-01", periods=20, freq="M") result = idx < idx[10] exp = idx.values < idx.values[10] tm.assert_numpy_array_equal(result, exp) # TODO: moved from test_datetime64; de-duplicate with version below def test_parr_cmp_period_scalar2(self, box_with_array): xbox = box_with_array if box_with_array is not pd.Index else np.ndarray pi = pd.period_range("2000-01-01", periods=10, freq="D") val = Period("2000-01-04", freq="D") expected = [x > val for x in pi] ser = tm.box_expected(pi, box_with_array) expected = tm.box_expected(expected, xbox) result = ser > val tm.assert_equal(result, expected) val = pi[5] result = ser > val expected = [x > val for x in pi] expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_period_scalar(self, freq, box_with_array): # GH#13200 xbox = np.ndarray if box_with_array is pd.Index else box_with_array base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) per = Period("2011-02", freq=freq) exp = np.array([False, True, False, False]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base == per, exp) tm.assert_equal(per == base, exp) exp = np.array([True, False, True, True]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base != per, exp) tm.assert_equal(per != base, exp) exp = np.array([False, False, True, True]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base > per, exp) tm.assert_equal(per < base, exp) exp = np.array([True, False, False, False]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base < per, exp) tm.assert_equal(per > base, exp) exp = np.array([False, True, True, True]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base >= per, exp) tm.assert_equal(per <= base, exp) exp = np.array([True, True, False, False]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base <= per, exp) tm.assert_equal(per >= base, exp) @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_pi(self, freq, box_with_array): # GH#13200 xbox = np.ndarray if box_with_array is pd.Index else box_with_array base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) # TODO: could also box idx? idx =
PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
pandas.PeriodIndex
import unittest from random import random from craft_ai.pandas import CRAFTAI_PANDAS_ENABLED if CRAFTAI_PANDAS_ENABLED: import copy import pandas as pd from numpy.random import randn import craft_ai.pandas from .data import pandas_valid_data, valid_data from .utils import generate_entity_id from . import settings AGENT_ID_1_BASE = "test_pandas_1" AGENT_ID_2_BASE = "test_pandas_2" GENERATOR_ID_BASE = "test_pandas_generator" SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION SIMPLE_AGENT_BOOSTING_CONFIGURATION = ( pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION ) SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = ( pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE ) AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = ( pandas_valid_data.AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE ) SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA SIMPLE_AGENT_BOOSTING_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_DATA SIMPLE_AGENT_BOOSTING_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_MANY_DATA AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = ( pandas_valid_data.AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA ) SIMPLE_AGENT_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_MANY_DATA COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2 COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2 DATETIME_AGENT_CONFIGURATION = pandas_valid_data.DATETIME_AGENT_CONFIGURATION DATETIME_AGENT_DATA = pandas_valid_data.DATETIME_AGENT_DATA MISSING_AGENT_CONFIGURATION = pandas_valid_data.MISSING_AGENT_CONFIGURATION MISSING_AGENT_DATA = pandas_valid_data.MISSING_AGENT_DATA MISSING_AGENT_DATA_DECISION = pandas_valid_data.MISSING_AGENT_DATA_DECISION INVALID_PYTHON_IDENTIFIER_CONFIGURATION = ( pandas_valid_data.INVALID_PYTHON_IDENTIFIER_CONFIGURATION ) INVALID_PYTHON_IDENTIFIER_DATA = pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DATA INVALID_PYTHON_IDENTIFIER_DECISION = ( pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DECISION ) EMPTY_TREE = pandas_valid_data.EMPTY_TREE CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasSimpleAgent(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgent") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_add_agent_operations_df_bad_index(self): df = pd.DataFrame(randn(10, 5), columns=["a", "b", "c", "d", "e"]) self.assertRaises( craft_ai.pandas.errors.CraftAiBadRequestError, CLIENT.add_agent_operations, self.agent_id, df, ) def test_add_agent_operations_df(self): CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_websocket(self): CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA, True) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_unexpected_property(self): df = pd.DataFrame( randn(300, 6), columns=["a", "b", "c", "d", "e", "f"], index=pd.date_range("20200101", periods=300, freq="T").tz_localize( "Europe/Paris" ), ) self.assertRaises( craft_ai.pandas.errors.CraftAiBadRequestError, CLIENT.add_agent_operations, self.agent_id, df, ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasComplexAgent(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_add_agent_operations_df_complex_agent(self): CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_complex_agent_websocket(self): CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA, True) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_without_tz(self): test_df = COMPLEX_AGENT_DATA.drop(columns="tz") CLIENT.add_agent_operations(self.agent_id, test_df) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_without_tz_websocket(self): test_df = COMPLEX_AGENT_DATA.drop(columns="tz") CLIENT.add_agent_operations(self.agent_id, test_df, True) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9, ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasMissingAgent(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgent") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_add_agent_operations_df_missing_agent(self): CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9, ) def test_add_agent_operations_df_missing_agent_websocket(self): CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA, True) agent = CLIENT.get_agent(self.agent_id) self.assertEqual( agent["firstTimestamp"], MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9, ) self.assertEqual( agent["lastTimestamp"], MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9, ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasSimpleAgentWithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_get_agent_operations_df(self): df = CLIENT.get_agent_operations(self.agent_id) self.assertEqual(len(df), 300) self.assertEqual(len(df.dtypes), 5) self.assertEqual( df.first_valid_index(), pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"), ) self.assertEqual( df.last_valid_index(), pd.Timestamp("2020-01-01 04:59:00", tz="Europe/Paris"), ) def test_get_agent_states_df(self): df = CLIENT.get_agent_states(self.agent_id) self.assertEqual(len(df), 180) self.assertEqual(len(df.dtypes), 5) self.assertEqual( df.first_valid_index(), pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"), ) self.assertEqual( df.last_valid_index(), pd.Timestamp("2020-01-01 04:58:20", tz="Europe/Paris"), ) def test_tree_visualization(self): tree1 = CLIENT.get_agent_decision_tree( self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9 ) craft_ai.pandas.utils.create_tree_html(tree1, "", "constant", None, 500) def test_display_tree_raised_error(self): tree1 = CLIENT.get_agent_decision_tree( self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9 ) self.assertRaises( craft_ai.pandas.errors.CraftAiError, craft_ai.pandas.utils.display_tree, tree1, ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasSimpleAgentWithOperations(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWOp") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_get_decision_tree_with_pdtimestamp(self): # test if we get the same decision tree decision_tree = CLIENT.get_agent_decision_tree( self.agent_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC") ) ground_truth_decision_tree = CLIENT.get_agent_decision_tree( self.agent_id, valid_data.VALID_TIMESTAMP ) self.assertIsInstance(decision_tree, dict) self.assertNotEqual(decision_tree.get("_version"), None) self.assertNotEqual(decision_tree.get("configuration"), None) self.assertNotEqual(decision_tree.get("trees"), None) self.assertEqual(decision_tree, ground_truth_decision_tree) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasComplexAgentWithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgentWData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_get_agent_operations_df_complex_agent(self): df = CLIENT.get_agent_operations(self.agent_id) self.assertEqual(len(df), 10) self.assertEqual(len(df.dtypes), 3) self.assertEqual( df.first_valid_index(), pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"), ) self.assertEqual( df.last_valid_index(), pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"), ) def test_decide_from_contexts_df(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9 ) test_df = COMPLEX_AGENT_DATA test_df_copy = test_df.copy(deep=True) df = CLIENT.decide_from_contexts_df(tree, test_df) self.assertEqual(len(df), 10) self.assertEqual(len(df.dtypes), 6) self.assertTrue(test_df.equals(test_df_copy)) self.assertEqual( df.first_valid_index(), pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"), ) self.assertEqual( df.last_valid_index(), pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"), ) # Also works as before, with a plain context output = CLIENT.decide(tree, {"a": 1, "tz": "+02:00"}) self.assertEqual(output["output"]["b"]["predicted_value"], "Pierre") def test_decide_from_contexts_df_zero_rows(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9 ) test_df = COMPLEX_AGENT_DATA.iloc[:0, :] self.assertRaises( craft_ai.errors.CraftAiBadRequestError, CLIENT.decide_from_contexts_df, tree, test_df, ) def test_decide_from_contexts_df_empty_df(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9 ) self.assertRaises( craft_ai.errors.CraftAiBadRequestError, CLIENT.decide_from_contexts_df, tree, pd.DataFrame(), ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasComplexAgent2WithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent2WData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id) CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_decide_from_contexts_df_null_decisions(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9 ) test_df = pd.DataFrame( [["Jean-Pierre", "+02:00"], ["Paul"]], columns=["b", "tz"], index=pd.date_range("20200201", periods=2, freq="D").tz_localize( "Europe/Paris" ), ) test_df_copy = test_df.copy(deep=True) df = CLIENT.decide_from_contexts_df(tree, test_df) self.assertEqual(len(df), 2) self.assertTrue(test_df.equals(test_df_copy)) self.assertTrue(pd.notnull(df["a_predicted_value"][0])) self.assertTrue(pd.notnull(df["a_predicted_value"][1])) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasComplexAgent3WithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent3WData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_id) CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA_2) def test_decide_from_contexts_df_empty_tree(self): test_df = pd.DataFrame( [[0, "Jean-Pierre", "+02:00"], [1, "Paul", "+02:00"]], columns=["a", "b", "tz"], index=pd.date_range("20200201", periods=2, freq="D").tz_localize( "Europe/Paris" ), ) df = CLIENT.decide_from_contexts_df(EMPTY_TREE, test_df) expected_error_message = ( "Unable to take decision: the decision tree is not " "based on any context operations." ) self.assertEqual(len(df), 2) self.assertEqual(df.columns, ["error"]) self.assertEqual(df["error"][0], expected_error_message) self.assertEqual(df["error"][1], expected_error_message) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_decide_from_contexts_df_with_array(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, COMPLEX_AGENT_DATA_2.last_valid_index().value // 10 ** 9 ) test_df = pd.DataFrame( [["Jean-Pierre", "+02:00"], ["Paul"]], columns=["b", "tz"], index=pd.date_range("20200201", periods=2, freq="D").tz_localize( "Europe/Paris" ), ) test_df_copy = test_df.copy(deep=True) df = CLIENT.decide_from_contexts_df(tree, test_df) self.assertEqual(len(df), 2) self.assertTrue(test_df.equals(test_df_copy)) self.assertTrue(pd.notnull(df["a_predicted_value"][0])) self.assertTrue(pd.notnull(df["a_predicted_value"][1])) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasMissingAgentWithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgentWData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_decide_from_missing_contexts_df(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9, "2" ) df = CLIENT.decide_from_contexts_df(tree, MISSING_AGENT_DATA_DECISION) self.assertEqual(len(df), 2) self.assertEqual( df.first_valid_index(), pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"), ) self.assertEqual( df.last_valid_index(), pd.Timestamp("2020-01-02 00:00:00", tz="Europe/Paris"), ) # Also works as before, with a context containing an optional value output = CLIENT.decide(tree, {"b": {}, "tz": "+02:00"}) self.assertTrue(pd.notnull(output["output"]["a"]["predicted_value"])) # Also works as before, with a context containing a missing value output = CLIENT.decide(tree, {"b": None, "tz": "+02:00"}) self.assertTrue(pd.notnull(output["output"]["a"]["predicted_value"])) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasDatetimeAgentWithData(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "DatetimeAgentWData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(DATETIME_AGENT_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, DATETIME_AGENT_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_datetime_states_df(self): df = CLIENT.get_agent_states(self.agent_id) self.assertEqual(len(df), 10) self.assertEqual(len(df.dtypes), 4) self.assertEqual(df["myTimeOfDay"].tolist(), [2, 3, 6, 7, 4, 5, 14, 15, 16, 19]) # This test is commented because of the current non-deterministic behavior of craft ai. # def test_datetime_decide_from_contexts_df(self): # tree = CLIENT.get_agent_decision_tree(AGENT_ID, # DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9) # test_df = pd.DataFrame( # [ # [1], # [3], # [7] # ], # columns=["a"], # index=pd.date_range("20200101 00:00:00", # periods=3, # freq="H").tz_localize("Asia/Shanghai")) # test_df_copy = test_df.copy(deep=True) # df = CLIENT.decide_from_contexts_df(tree, test_df) # self.assertEqual(len(df), 3) # self.assertEqual(len(df.dtypes), 6) # self.assertEqual(df["b_predicted_value"].tolist(), ["Pierre", "Paul", "Jacques"]) # self.assertTrue(test_df.equals(test_df_copy)) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasAgentWithInvalidIdentifier(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "InvalidIdentifier") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(INVALID_PYTHON_IDENTIFIER_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, INVALID_PYTHON_IDENTIFIER_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_decide_from_python_invalid_identifier(self): tree = CLIENT.get_agent_decision_tree( self.agent_id, INVALID_PYTHON_IDENTIFIER_DATA.last_valid_index().value // 10 ** 9, "2", ) test_df = INVALID_PYTHON_IDENTIFIER_DECISION.copy(deep=True) df = CLIENT.decide_from_contexts_df(tree, test_df) self.assertEqual(len(df), 3) self.assertEqual(len(df.dtypes), 8) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasGeneratorWithOperation(unittest.TestCase): def setUp(self): self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + "GeneratorWithOp") self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + "GeneratorWithOp") self.generator_id = generate_entity_id(GENERATOR_ID_BASE + "GeneratorWithOp") CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_1_id) CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_2_id) CLIENT.add_agent_operations(self.agent_1_id, valid_data.VALID_OPERATIONS_SET) CLIENT.add_agent_operations(self.agent_2_id, valid_data.VALID_OPERATIONS_SET) generator_configuration = copy.deepcopy( valid_data.VALID_GENERATOR_CONFIGURATION ) generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id] CLIENT.create_generator(generator_configuration, self.generator_id) def tearDown(self): CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) def test_get_generator_decision_tree_with_pdtimestamp(self): # test if we get the same decision tree decision_tree = CLIENT.get_generator_decision_tree( self.generator_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC"), ) ground_truth_decision_tree = CLIENT.get_generator_decision_tree( self.generator_id, valid_data.VALID_TIMESTAMP ) self.assertIsInstance(decision_tree, dict) self.assertNotEqual(decision_tree.get("_version"), None) self.assertNotEqual(decision_tree.get("configuration"), None) self.assertNotEqual(decision_tree.get("trees"), None) self.assertEqual(decision_tree, ground_truth_decision_tree) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasBoostingSimpleAgent(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "BoostingAgentWData") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_id) CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_BOOSTING_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_decide_boosting_from_contexts_df(self): context_df = pd.DataFrame( [[random(), random(), random(), "+01:00"] for i in range(4)], columns=["b", "c", "d", "e"], index=pd.date_range("20200101", periods=4, freq="T").tz_localize( "Europe/Paris", ), ) decisions = CLIENT.decide_boosting_from_contexts_df( self.agent_id, SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9, SIMPLE_AGENT_BOOSTING_DATA.last_valid_index().value // 10 ** 9, context_df, ) self.assertEqual(decisions.shape[0], 4) self.assertTrue(len(decisions.columns) == 1) self.assertTrue("a_predicted_value" in decisions.columns) self.assertTrue( type(decisions.iloc[0]["a_predicted_value"]) == float or type(decisions.iloc[0]["a_predicted_value"] == int) ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasBoostingGeneratorWithOperation(unittest.TestCase): def setUp(self): self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + "BoostGeneratorWithOp") self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + "BoostGeneratorWithOp") self.generator_id = generate_entity_id( GENERATOR_ID_BASE + "BoostGeneratorWithOp" ) CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_1_id) CLIENT.create_agent(SIMPLE_AGENT_BOOSTING_CONFIGURATION, self.agent_2_id) CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_BOOSTING_DATA) CLIENT.add_agent_operations(self.agent_2_id, SIMPLE_AGENT_BOOSTING_MANY_DATA) generator_configuration = copy.deepcopy(SIMPLE_AGENT_BOOSTING_CONFIGURATION) generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id] CLIENT.create_generator(generator_configuration, self.generator_id) def tearDown(self): CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) def test_get_generator_boosting_with_pdtimestamp(self): context_df = pd.DataFrame( [[random(), random(), random(), "+01:00"] for i in range(4)], columns=["b", "c", "d", "e"], index=pd.date_range("20200101", periods=4, freq="T").tz_localize( "Europe/Paris", ), ) decisions = CLIENT.decide_generator_boosting_from_contexts_df( self.generator_id, SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9, SIMPLE_AGENT_BOOSTING_MANY_DATA.last_valid_index().value // 10 ** 9, context_df, ) self.assertEqual(decisions.shape[0], 4) self.assertTrue(len(decisions.columns) == 1) self.assertTrue("a_predicted_value" in decisions.columns) self.assertTrue( type(decisions.iloc[0]["a_predicted_value"]) == float or type(decisions.iloc[0]["a_predicted_value"] == int) ) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasBoostingGeneratorWithGeneratedType(unittest.TestCase): def setUp(self): self.agent_1_id = generate_entity_id( AGENT_ID_1_BASE + "BoostGeneratorWithGenType" ) self.agent_2_id = generate_entity_id( AGENT_ID_2_BASE + "BoostGeneratorWithGenType" ) self.generator_id = generate_entity_id( GENERATOR_ID_BASE + "BoostGeneratorWithGenType" ) CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) CLIENT.create_agent( SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE, self.agent_1_id ) CLIENT.create_agent( SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE, self.agent_2_id ) CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_BOOSTING_DATA) CLIENT.add_agent_operations(self.agent_2_id, SIMPLE_AGENT_BOOSTING_MANY_DATA) generator_configuration = copy.deepcopy( SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE ) generator_configuration["filter"] = [self.agent_1_id, self.agent_2_id] CLIENT.create_generator(generator_configuration, self.generator_id) def tearDown(self): CLIENT.delete_agent(self.agent_1_id) CLIENT.delete_agent(self.agent_2_id) CLIENT.delete_generator(self.generator_id) def test_get_generator_boosting_with_pdtimestamp(self): context_df = pd.DataFrame( [[random(), random(), random(), "+01:00"] for i in range(4)], columns=["b", "c", "d", "e"], index=pd.date_range("20200101", periods=4, freq="T").tz_localize( "Europe/Paris", ), ) decisions = CLIENT.decide_generator_boosting_from_contexts_df( self.generator_id, SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9, SIMPLE_AGENT_BOOSTING_MANY_DATA.last_valid_index().value // 10 ** 9, context_df, ) self.assertTrue(len(decisions.columns) == 1) self.assertTrue("a_predicted_value" in decisions.columns) self.assertTrue( type(decisions.iloc[0]["a_predicted_value"]) == float or type(decisions.iloc[0]["a_predicted_value"] == int) ) def test_get_generator_boosting_with_pdtimestamp_and_generated_col(self): # Check that the values given in a prediction context for a generated type are discarded context_df = pd.DataFrame( [ [ random(), random(), random(), "+01:00", int(random() * 6), 1 + int(random() * 11), ] for i in range(4) ], columns=["b", "c", "d", "e", "f", "g"], index=pd.date_range("20200101", periods=4, freq="T").tz_localize( "Europe/Paris", ), ) context_df_without_generated_col = context_df.iloc[:, 0:4] decisions = CLIENT.decide_generator_boosting_from_contexts_df( self.generator_id, SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9, SIMPLE_AGENT_BOOSTING_MANY_DATA.last_valid_index().value // 10 ** 9, context_df, ) decisions_without_cols = CLIENT.decide_generator_boosting_from_contexts_df( self.generator_id, SIMPLE_AGENT_BOOSTING_DATA.first_valid_index().value // 10 ** 9, SIMPLE_AGENT_BOOSTING_MANY_DATA.last_valid_index().value // 10 ** 9, context_df_without_generated_col, ) self.assertTrue(len(decisions.columns) == 1) self.assertTrue("a_predicted_value" in decisions.columns) self.assertTrue( type(decisions.iloc[0]["a_predicted_value"]) == float or type(decisions.iloc[0]["a_predicted_value"] == int) ) self.assertTrue(decisions.equals(decisions_without_cols)) @unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled") class TestPandasBoostingWithGeneratedTypeWithoutTSColumn(unittest.TestCase): def setUp(self): self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "BoostGenWitNoTSColumn") CLIENT.delete_agent(self.agent_id) CLIENT.create_agent( AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE, self.agent_id ) CLIENT.add_agent_operations(self.agent_id, AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA) def tearDown(self): CLIENT.delete_agent(self.agent_id) def test_get_generator_boosting_with_pdtimestamp(self): context_df = pd.DataFrame( [[random()] for i in range(4)], columns=["b"], index=
pd.date_range("20200101", periods=4, freq="T")
pandas.date_range
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `data_patterns` package.""" import unittest import os from data_patterns import data_patterns import pandas as pd class TestData_patterns(unittest.TestCase): """Tests for `data_patterns` package.""" def test_pattern1(self): """Test of read input date function""" # Input df = pd.DataFrame(columns = ['Name', 'Type', 'Assets', 'TV-life', 'TV-nonlife' , 'Own funds', 'Excess'], data = [['Insurer 1', 'life insurer', 1000, 800, 0, 200, 200], ['Insurer 2', 'non-life insurer', 4000, 0, 3200, 800, 800], ['Insurer 3', 'non-life insurer', 800, 0, 700, 100, 100], ['Insurer 4', 'life insurer', 2500, 1800, 0, 700, 700], ['Insurer 5', 'non-life insurer', 2100, 0, 2200, 200, 200], ['Insurer 6', 'life insurer', 9000, 8800, 0, 200, 200], ['Insurer 7', 'life insurer', 9000, 8800, 0, 200, 200], ['Insurer 8', 'life insurer', 9000, 8800, 0, 200, 200], ['Insurer 9', 'non-life insurer', 9000, 8800, 0, 200, 200], ['Insurer 10', 'non-life insurer', 9000, 0, 8800, 200, 199.99]]) df.set_index('Name', inplace = True) pattern = {'name' : 'Pattern 1', 'pattern' : '-->', 'P_columns': ['Type'], 'Q_columns': ['Assets', 'TV-life', 'TV-nonlife', 'Own funds'], 'encode' : {'Assets': 'reported', 'TV-life': 'reported', 'TV-nonlife': 'reported', 'Own funds': 'reported'}} # Expected output expected = pd.DataFrame(columns = ['index','pattern_id', 'cluster', 'pattern_def', 'support', 'exceptions', 'confidence'], data = [[0,'Pattern 1', 0, 'IF ({"Type"} = "life insurer") THEN ({"Assets"} = "reported") & ({"TV-life"} = "reported") & ({"TV-nonlife"} = "not reported") & ({"Own funds"} = "reported")', 5, 0, 1], [1,'Pattern 1', 0, 'IF ({"Type"} = "non-life insurer") THEN ({"Assets"} = "reported") & ({"TV-life"} = "not reported") & ({"TV-nonlife"} = "reported") & ({"Own funds"} = "reported")', 4, 1, 0.8]]) expected.set_index('index', inplace = True) expected = data_patterns.PatternDataFrame(expected) # Actual output p = data_patterns.PatternMiner(df) actual = p.find(pattern) actual = data_patterns.PatternDataFrame(actual.loc[:, 'pattern_id': 'confidence']) # Assert self.assertEqual(type(actual), type(expected), "Pattern test 1: types do not match")
pd.testing.assert_frame_equal(actual, expected)
pandas.testing.assert_frame_equal
import sys import pandas as pd import numpy as np from shutil import copyfile from datetime import datetime,timedelta input_files = {} if len(sys.argv) > 1: input_files['model_data'] = sys.argv[1] print("Model-Data-File: %s"%input_files['model_data']) input_files['unit'] = sys.argv[2] print("Unit-Data-File: %s"%input_files['unit'] ) input_files['time_series'] = sys.argv[3] print("Time-Series-File: %s"%input_files['time_series']) # Output Files output_files = {'time_series':"time_series_spine.csv", 'model_data':'model_data_spine.xlsx'} # sys.argv[1] is empty if script is not called from Spine else: input_files['model_data'] = "model_data.xlsx" input_files['unit'] = "unit_parameters.csv" input_files['time_series'] = "time_series.csv" # Output Files output_files = {'time_series':'manuel/time_series_spine.csv', 'model_data':'manuel/model_data_spine.xlsx'} def convert_model_data(input_files,output_files): # Read Time series values time_series = pd.read_csv(input_files['time_series'],sep=";",header=0,index_col=None) # times = time_series['Time'].tolist() power_load_raw = time_series['Load'].to_numpy() heat_load_raw = time_series['Heat'].to_numpy() wind_power_raw = time_series['Wind'].to_numpy() # Read unit capacities units =
pd.read_csv(input_files['unit'],sep=";",header=0,index_col=0)
pandas.read_csv
from typing import Any, Dict, Type # NOQA import logging from easydict import EasyDict from kedro.utils import load_obj import numpy as np import pandas as pd import sklearn # NOQA from sklearn.metrics import ( accuracy_score, confusion_matrix, f1_score, precision_score, recall_score, roc_auc_score, ) log = logging.getLogger(__name__) def get_cols_features( df, non_feature_cols=[ "Treatment", "Outcome", "TransformedOutcome", "Propensity", "Recommendation", ], ): return [column for column in df.columns if column not in non_feature_cols] def concat_train_test(args, train, test): r""" Concatenate train and test series. Use series.xs('train') or series.xs('test') to split """ if test is None: series = pd.concat( [pd.Series(train)], keys=["train"], names=[args.partition_name, args.index_name], ) else: series = pd.concat( [pd.Series(train), pd.Series(test)], keys=["train", "test"], names=[args.partition_name, args.index_name], ) return series def concat_train_test_df(args, train, test): r""" Concatenate train and test data frames. Use df.xs('train') or df.xs('test') to split. """ df = pd.concat( [train, test], keys=["train", "test"], names=[args.partition_name, args.index_name], ) return df def len_t(df, treatment=1.0, col_treatment="Treatment"): return df.query("{}=={}".format(col_treatment, treatment)).shape[0] def len_o(df, outcome=1.0, col_outcome="Outcome"): return df.query("{}=={}".format(col_outcome, outcome)).shape[0] def len_to( df, treatment=1.0, outcome=1.0, col_treatment="Treatment", col_outcome="Outcome" ): len_ = df.query( "{}=={} & {}=={}".format(col_treatment, treatment, col_outcome, outcome) ).shape[0] return len_ def treatment_fraction_(df, col_treatment="Treatment"): return len_t(df, col_treatment=col_treatment) / len(df) def treatment_fractions_( args, # type: Dict[str, Any] df, # type: Type[pd.DataFrame] ): # type: (...) -> Type[EasyDict] col_treatment = args.col_treatment treatment_fractions = { "train": treatment_fraction_(df.xs("train"), col_treatment=col_treatment), "test": treatment_fraction_(df.xs("test"), col_treatment=col_treatment), } return EasyDict(treatment_fractions) def outcome_fraction_(df, col_outcome="Outcome"): return len_o(df, col_outcome=col_outcome) / len(df) def overall_uplift_gain_( df, treatment=1.0, outcome=1.0, col_treatment="Treatment", col_outcome="Outcome" ): overall_uplift_gain = ( len_to(df, col_treatment=col_treatment, col_outcome=col_outcome) / len_t(df, col_treatment=col_treatment) ) - ( len_to(df, 0, 1, col_treatment=col_treatment, col_outcome=col_outcome) / len_t(df, 0, col_treatment=col_treatment) ) return overall_uplift_gain def gain_tuple(df_, r_): treatment_fraction = treatment_fraction_(df_) outcome_fraction = outcome_fraction_(df_) overall_uplift_gain = overall_uplift_gain_(df_) cgain = np.interp(treatment_fraction, r_.cgains_x, r_.cgains_y) cgain_base = overall_uplift_gain * treatment_fraction cgain_factor = cgain / cgain_base return ( treatment_fraction, outcome_fraction, overall_uplift_gain, cgain, cgain_base, cgain_factor, r_.Q_cgains, r_.q1_cgains, r_.q2_cgains, ) def score_df(y_train, y_test, y_pred_train, y_pred_test, average="binary"): if ( y_train is not None and y_pred_train is not None and len(y_train) != len(y_pred_train) ): raise Exception("Lengths of true and predicted for train do not match.") if ( y_test is not None and y_pred_test is not None and len(y_test) != len(y_pred_test) ): raise Exception("Lengths of true and predicted for test do not match.") score_df = pd.DataFrame() for (partition_, y_, y_pred_) in [ ("train", y_train, y_pred_train), ("test", y_test, y_pred_test), ]: if ( y_ is not None and y_pred_ is not None and (0 <= y_).all() and (y_ <= 1).all() and (0 <= y_pred_).all() and (y_pred_ <= 1).all() ): num_classes = pd.Series(y_).nunique() score_list = [ len(y_),
pd.Series(y_)
pandas.Series
import os import json import random import pandas as pd import numpy as np from split_otus import calc_kmer_feat, calc_kmer_feat_merged, split_otus, write_kmer_out from config import cfg def create_lsa_from_sparcc(sparcc_thresh): begin_idx = 0 for graph_idx in cfg.USE_GRAPH: cfg.set_graph_idx(graph_idx) path = os.path.join(cfg.DATA_DIR, cfg.SPARCC_FILE) sparcc_data = pd.read_csv(path, sep='\t', index_col=0) cols = sparcc_data.columns.values.tolist() rows = sparcc_data.index.values.tolist() col_otu_idx = list(map(lambda x: int(x[4:]), cols)) row_otu_idx = list(map(lambda x: int(x[4:]), rows)) sparcc_mat = sparcc_data.to_numpy() edge_idx = np.where(sparcc_mat >= sparcc_thresh) # 边选择 # edge_idx = np.where((sparcc_mat >= sparcc_thresh)|(sparcc_mat<=-sparcc_thresh) ) row_otu_idx = np.array(row_otu_idx) col_otu_idx = np.array(col_otu_idx) otu_src_idx = ['Graph{}_{}'.format(graph_idx, idx) for idx in row_otu_idx[edge_idx[0]]] otu_dst_idx = ['Graph{}_{}'.format(graph_idx, idx) for idx in row_otu_idx[edge_idx[1]]] begin_idx += len(cols) if graph_idx == 1: edge_data = pd.DataFrame.from_dict({'index1': otu_src_idx, 'index2': otu_dst_idx}) else: new_edge_data = pd.DataFrame.from_dict({'index1': otu_src_idx, 'index2': otu_dst_idx}) edge_data = pd.concat([edge_data, new_edge_data]) # write like lsa format edge_data.to_csv(os.path.join(cfg.MERGED_OUTPUT_DIR, cfg.LSA_EDGE_FILE), sep='\t') def create_id_map(): # to avoid otu not in kmer feats id_map = {} i = 0 for graph_idx in cfg.USE_GRAPH: cfg.set_graph_idx(graph_idx) feats = calc_kmer_feat(cfg.KMER_LENGH) path = os.path.join(cfg.DATA_DIR, cfg.SPARCC_FILE) sparcc_data =
pd.read_csv(path, sep='\t', index_col=0)
pandas.read_csv
# -*- tab-width:4;indent-tabs-mode:nil;show-trailing-whitespace:t;rm-trailing-spaces:t -*- # vi: set ts=2 noet: import os import io import math import PIL import PIL.ImageDraw import PIL.ImageOps import boto3 import mysql.connector import numpy as np import pandas as pd import matplotlib as mpl import xlsxwriter def retrieve_object_coordinates_from_db( con, object_ids, key_object, verbose=False): """ object_ids is a pd.DataFrame with rows representing objects and columns Plate_Name Image_Metadata_WellID Image_Metadata_FieldID <key_object>_Number_Object_Number Retrieve image information from <Plate_Name>_Per_<key_object> Return a DataFrame for each (object, dye) with columns Plate_Name ImageNumber <key_object>_Number_Object_Number <key_object>_AreaShape_Center_X <key_object>_AreaShape_Center_Y """ required_columns = [ "Plate_Name", 'Image_Metadata_WellID', 'Image_Metadata_FieldID', f'{key_object}_Number_Object_Number'] for required_column in required_columns: if required_column not in object_ids.columns: raise Exception(f"Missing required column {required_column}") object_coordinates = [] cursor = con.cursor() for object_index in range(object_ids.shape[0]): object_params = object_ids.iloc[object_index] if verbose: print(f"Getting coordinates for object:") print(f" Plate_Name: '{object_params['Plate_Name']}'") print(f" Image_Metadata_WellID: '{object_params['Image_Metadata_WellID']}'") print(f" Image_Metadata_FieldID: '{object_params['Image_Metadata_FieldID']}'") print(f" {key_object}_Number_Object_Number: '{object_params[f'{key_object}_Number_Object_Number']}'") #Object Info query = f""" SELECT key_object.{key_object}_AreaShape_Center_X, key_object.{key_object}_AreaShape_Center_Y FROM {f"{object_params['Plate_Name']}_Per_Image"} AS image, {f"{object_params['Plate_Name']}_Per_{key_object}"} AS key_object WHERE image.Image_Metadata_WellID = '{object_params['Image_Metadata_WellID']}' AND image.Image_Metadata_FieldID = '{object_params['Image_Metadata_FieldID']}' key_object.ImageNumber = image.ImageNumber AND key_object.{key_object}_Number_Object_Number = {object_params[f'{key_object}_Number_Object_Number']}; """ if verbose: print(query) cursor.execute(query) values = cursor.fetchone() object_coordinates.append(dict( object_params.to_dict(), **{ f"{key_object}_AreaShape_Center_X" : values[0], f"{key_object}_AreaShape_Center_Y" : values[1]})) cursor.close() object_coordinates =
pd.DataFrame(object_coordinates)
pandas.DataFrame
# Author: <NAME> # Created: 7/20/20, 3:43 PM import logging import argparse import os from timeit import default_timer as timer import pandas as pd from subprocess import CalledProcessError from typing import * # noinspection All import pathmagic # noinspection PyUnresolvedReferences import mg_log # runs init in mg_log and configures logger # Custom imports from mg_container.genome_list import GenomeInfoList, GenomeInfo from mg_general import Environment, add_env_args_to_parser import mg_argparse.parallelization from mg_general.general import get_value, os_join from mg_general.genome_splitter import GenomeSplitter from mg_io.general import remove_p, mkdir_p from mg_io.labels import read_labels_from_file, write_labels_to_file from mg_io.shelf import read_sequences_for_gi, read_labels_for_gi from mg_models.shelf import run_gms2, run_prodigal, run_meta_prodigal, run_mgm2, run_mgm, run_fgs, run_mga, run_mgm2_autogcode from mg_options.parallelization import ParallelizationOptions from mg_parallelization.generic_threading import run_n_per_thread from mg_parallelization.pbs import PBS from mg_pbs_data.mergers import merge_identity from mg_pbs_data.splitters import split_gil from mg_viz.shelf import mkstemp_closed # ------------------------------ # # Parse CMD # # ------------------------------ # parser = argparse.ArgumentParser("Run tools on genome chunks.") parser.add_argument('--pf-gil', required=True) parser.add_argument('--tools', required=True, nargs="+", choices=["gms2", "mgm", "mgm2", "mga", "mgm2_auto", "mprodigal", "prodigal", "ncbi", "verified", "fgs"], type=str.lower) parser.add_argument('--dn_tools', nargs="+") parser.add_argument('--dn-prefix', default=None, help="Applies prefix to all run directories") parser.add_argument('--pf-summary', required=True, help="Output file that will contain summary of runs") parser.add_argument('--force-split-in-intergenic', action='store_true') parser.add_argument('--skip-if-exists', action='store_true') parser.add_argument('--pf-mgm2-mod', type=os.path.abspath) parser.add_argument('--pf-mgm-mod', type=os.path.abspath) parser.add_argument('--chunk-sizes-nt', nargs="+", default=[250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000, 5000 ], type=int) mg_argparse.parallelization.add_parallelization_options(parser) add_env_args_to_parser(parser) parsed_args = parser.parse_args() # ------------------------------ # # Main Code # # ------------------------------ # # Load environment variables my_env = Environment.init_from_argparse(parsed_args) # Setup logger logging.basicConfig(level=parsed_args.loglevel) logger = logging.getLogger("logger") # type: logging.Logger def run_tool_on_chunk(env, tool, pf_sequences, pf_prediction, **kwargs): # type: (Environment, str, str, str, Dict[str, Any]) -> None skip_if_exists = get_value(kwargs, "skip_if_exists", False) pf_mgm2_mod = get_value(kwargs, "pf_mgm2_mod", required=tool == "mgm2") pf_mgm_mod = get_value(kwargs, "pf_mgm_mod", required=tool == "mgm") pf_labels = get_value(kwargs, "pf_labels", required=tool in {"verified", "ncbi"}) # dn_labels = get_value(kwargs, "dn_labels", default=None) genome_splitter = get_value(kwargs, "genome_splitter", required=tool in {"verified", "ncbi"}) if skip_if_exists and os.path.isfile(pf_prediction): return try: if tool == "gms2": run_gms2(env, pf_sequences, pf_prediction, **kwargs) elif tool == "prodigal": run_prodigal(env, pf_sequences, pf_prediction, **kwargs) elif tool == "mprodigal": run_meta_prodigal(env, pf_sequences, pf_prediction, **kwargs) elif tool == "mgm2": run_mgm2(env, pf_sequences, pf_mgm2_mod, pf_prediction) elif tool == "mgm": run_mgm(env, pf_sequences, pf_mgm_mod, pf_prediction) elif tool == "fgs": run_fgs(env, pf_sequences, pf_prediction) elif tool == "mga": run_mga(env, pf_sequences, pf_prediction) elif tool == "mgm2_auto": run_mgm2_autogcode(env, pf_sequences, pf_prediction) # elif tool in {"ncbi", "verified", "sbsp", "sbsp_plus"}: # apply_labels_to_genome_splitter(env, pf_labels, genome_splitter, ) else: raise NotImplementedError() except CalledProcessError: logger.warning(f"Could not run {tool} on {pf_sequences}") def run_tools_on_chunk(env, gi, tools, chunk, **kwargs): # type: (Environment, GenomeInfo, List[str], int, Dict[str, Any]) -> pd.DataFrame dn_tools = get_value(kwargs, "dn_tools", tools) dn_prefix = get_value(kwargs, "dn_prefix", "") skip_if_exists = get_value(kwargs, "skip_if_exists", False) # split genome into chunks gs = GenomeSplitter( read_sequences_for_gi(env, gi), chunk, labels=read_labels_for_gi(env, gi), allow_splits_in_cds=kwargs.get("allow_splits_in_cds") ) # FIXME: Account for GMS2 pf_chunks = mkstemp_closed(dir=env["pd-work"], suffix=".fasta") gs.write_to_file(pf_chunks) list_entries = list() for t, dn in zip(tools, dn_tools): logger.debug(f"{gi.name};{chunk};{t}") pd_run = os_join(env["pd-work"], gi.name, f"{dn_prefix}{dn}_{chunk}") mkdir_p(pd_run) start = timer() pf_prediction = os_join(pd_run, "prediction.gff") skip = False if skip_if_exists and os.path.isfile(pf_prediction): skip = True if not skip: run_tool_on_chunk( env.duplicate({"pd-work": pd_run}), t, pf_chunks, pf_prediction, **kwargs ) key_value_delimiters_gff = { "mgm": " ", "mgm2": " ", "gms2": " ", "mgm2_auto": " ", "mprodigal": "=", "prodigal": "=", "fgs": "=", "mga": "=" } attribute_delimiter_gff = { "mgm": ";" } # update labels file based on offset labels = read_labels_from_file(pf_prediction, shift=0, key_value_delimiter=key_value_delimiters_gff.get( t.lower(), "=" ), attribute_delimiter=attribute_delimiter_gff.get(t.lower()), ignore_partial=False) seqname_to_offset = {x[0].id: x[1] for x in gs.split_sequences_} seqname_to_info = {x[0].id: x for x in gs.split_sequences_} for l in labels: # add attribute indicating index of chunk in original sequence (to allow for comparing of partial genes) l.set_attribute_value( "chunk_left_in_original", f"{seqname_to_info[l.seqname()][2]}" ) l.set_attribute_value( "chunk_right_in_original", f"{seqname_to_info[l.seqname()][3]}" ) l.coordinates().left += seqname_to_offset[l.seqname()] l.coordinates().right += seqname_to_offset[l.seqname()] l.set_seqname(l.seqname().split("_offset")[0]) write_labels_to_file(labels, pf_prediction, shift_coordinates_by=0) end = timer() list_entries.append({ "Genome": gi.name, "Clade": gi.attributes.get("ancestor"), "Tool": t, "Chunk Size": chunk, "Predictions": pf_prediction, "Runtime": end - start }) remove_p(pf_chunks) return pd.DataFrame(list_entries) def run_tools_on_gi(env, gi, tools, chunks, **kwargs): # type: (Environment, GenomeInfo, List[str], List[int], Dict[str, Any]) -> pd.DataFrame env = env.duplicate({'pd-work': env["pd-runs"]}) num_processors = get_value(kwargs, "num_processors", 1, valid_type=int) if num_processors > 1: list_df = run_n_per_thread(chunks, run_tools_on_chunk, "chunk", { "env": env, "gi": gi, "tools": tools, **kwargs }) else: list_df = list() for chunk in chunks: logger.debug(f"{gi.name};{chunk}") curr = run_tools_on_chunk(env, gi, tools, chunk, **kwargs) list_df.append(curr) return pd.concat(list_df, sort=False, ignore_index=True) def run_tools_on_gil(env, gil, tools, chunks, **kwargs): # type: (Environment, GenomeInfoList, List[str], List[int], Dict[str, Any]) -> None list_df = list() for gi in gil: list_df.append(run_tools_on_gi(env, gi, tools, chunks, **kwargs)) return
pd.concat(list_df, sort=False, ignore_index=True)
pandas.concat
''' <NAME> Python 3 possibility calculator(v2) ''' import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.ticker import MultipleLocator class Hat: def __init__(self, contents, seed=None): ''' contents: list of 5 integer [white, red, yellow, green, blue] that indicates the amount of balls of each color seed: optional, integer to initialize the random generator for reproducibility, default is none ''' if len(contents) != 5: raise AttributeError('Invalid input, contents must be a list of 5 integer [white, red, yellow, green, blue]') if min(contents) < 0: raise AttributeError('Invalid input, the minimum amount of color is 0') self.contents = contents self.amount = 0 self.ball_choice = [] for i in range(5): if self.contents[i]: self.ball_choice.append(i) self.amount += contents[i] #_ random generator self.rng = np.random.default_rng(seed) def draw(self, num): ''' input type: integer return type: list of count(order: white, red, yellow, green, blue) This method should remove balls at random from contents. The balls should not go back into the hat during the draw, similar to an urn experiment without replacement. If the number of balls to draw exceeds the available quantity, return all the balls. ''' if num <= 0: raise AttributeError('Invalid input; num must be an integer > 0') if num >= self.amount: return self.contents cnt = self.contents[ : ] candidates = self.ball_choice[ : ] res = [0] * 5 for _ in range(num): #_ set p for not uniform distribution; the cnt affects the color picked p = np.array(list(cnt[i] for i in candidates)) i = self.rng.choice(candidates, p = p / p.sum() ) cnt[i] -= 1 res[i] += 1 if not cnt[i]: candidates.remove(i) return res class Dice: def __init__(self, sides, seed=None): ''' side: int, side/ face of dice seed: optional, integer to initialize the random generator for reproducibility, default is none ''' self.sides = sides self.rng = np.random.default_rng(seed) def rolling(self): ''' output: integer of point ''' return self.rng.integers(1, self.sides + 1) class Experiment: def __init__(self): pass def hat_experiment(self, hat, num_balls_drawn, experiment_time, expected_balls): ''' calculate the possibility of successful prediction (occurrence of color-ball) return possibility(float) and print statistic charts of occurrence of each color hat: A hat object containing balls that should be copied inside the function num_balls_drawn: integer, number of balls to draw out of the hat in each experiment. experiment_time: integer, number of experiments to perform. expected_balls: list of 5 integer [white, red, yellow, green, blue] that indicating the exact group attempt to draw from the hat. ''' if experiment_time <= 0: raise AttributeError('Invalid input, experiment time must be a positive integer') if len(expected_balls) != 5: raise AttributeError('Invalid input, expected balls must be a list of 5 integer [white, red, yellow, green, blue]') success = 0 data = [] cmap = ['#aaaaaa', '#ff6666', '#e6e600', '#8cff66', '#66e0ff'] fig, (ax1, ax2) = plt.subplots(2,1) #_ calculate possibility while generating visualization chart ax1 for x in range(experiment_time): cnt = hat.draw(num_balls_drawn) data.append(cnt) flag = True padding = 0 # sum of other colors so far for i in range(5): if cnt[i] < expected_balls[i]: flag = False ax1.bar(x=x, height=cnt[i], bottom=padding, color=cmap[i]) padding += cnt[i] if flag: success += 1 ax1.set_title('color composement') ax1.set_xlabel('experiment no.') ax1.set_ylabel('number') data = pd.DataFrame(data) loc = MultipleLocator(1) for c in data: #_ freq = frequency of occurrence happens #_ freq.index = occurrence of ball per draw freq = data[c].value_counts().sort_index() ax2.plot(freq.index, freq, color=cmap[c]) ax2.set_title('Compare of occurrence') ax2.grid(True) ax2.xaxis.set_major_locator(loc) ax2.set_xlabel('occurrence in each draw') ax2.set_ylabel('frequency') fig.tight_layout() #_ avoid label overlapping plt.show() print(success / experiment_time) return success / experiment_time def dice_experiment(self, dice, num_dices, experiment_time, expected_sum): ''' Rolling num_dices dices at each time, return the possibility(float) of having exactly same sum of points as expected, and print chart of occurrence of specific point dice: Dice object; expected_sum/ roll_time / experiment_time: integer ''' if experiment_time <= 0: raise AttributeError('Invalid input, experiment time must be a positive integer') if num_dices < 0: raise AttributeError("Invalid input; num of dice can't less than 0") if expected_sum > num_dices * dice.sides or expected_sum < num_dices: return 0 data = [] sum_points = [] success = 0 for _ in range(experiment_time): _sum = 0 cnt = np.zeros(dice.sides, dtype=int) for __ in range(num_dices): point = dice.rolling() _sum += point cnt[point - 1] += 1 data.append(cnt) sum_points.append(_sum) if _sum == expected_sum: success += 1 #_ create figures fig = plt.figure(constrained_layout=True) gs = GridSpec(2, 2, figure=fig) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[:, 1]) #_ labels and locator bins = np.arange(num_dices, num_dices * dice.sides + 1) loc = MultipleLocator(1) points = np.arange(1, dice.sides + 1) #_ data df =
pd.DataFrame(data, columns=points)
pandas.DataFrame
""" Area Weighted Interpolation """ import numpy as np import geopandas as gpd from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf import warnings from scipy.sparse import dok_matrix, diags, coo_matrix import pandas as pd from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs def _area_tables_binning(source_df, target_df, spatial_index): """Construct area allocation and source-target correspondence tables using a spatial indexing approach ... NOTE: this currently relies on Geopandas' spatial index machinery Parameters ---------- source_df : geopandas.GeoDataFrame GeoDataFrame containing input data and polygons target_df : geopandas.GeoDataFramee GeoDataFrame defining the output geometries spatial_index : str Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. Returns ------- tables : scipy.sparse.dok_matrix """ if _check_crs(source_df, target_df): pass else: return None df1 = source_df.copy() df2 = target_df.copy() # it is generally more performant to use the longer df as spatial index if spatial_index == "auto": if df1.shape[0] > df2.shape[0]: spatial_index = "source" else: spatial_index = "target" if spatial_index == "source": ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects") elif spatial_index == "target": ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects") else: raise ValueError( f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'." ) areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area table = coo_matrix( (areas, (ids_src, ids_tgt),), shape=(df1.shape[0], df2.shape[0]), dtype=np.float32, ) table = table.todok() return table def _area_tables(source_df, target_df): """ Construct area allocation and source-target correspondence tables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame Returns ------- tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t Notes ----- The assumption is both dataframes have the same coordinate reference system. Union geometry is a geometry formed by the intersection of a source geometry and a target geometry SU Maps source geometry to union geometry, UT maps union geometry to target geometry """ if _check_crs(source_df, target_df): pass else: return None source_df = source_df.copy() source_df = source_df.copy() n_s = source_df.shape[0] n_t = target_df.shape[0] _left = np.arange(n_s) _right = np.arange(n_t) source_df.loc[:, "_left"] = _left # create temporary index for union target_df.loc[:, "_right"] = _right # create temporary index for union res_union = gpd.overlay(source_df, target_df, how="union") n_u, _ = res_union.shape SU = np.zeros( (n_s, n_u) ) # holds area of intersection of source geom with union geom UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom for index, row in res_union.iterrows(): # only union polygons that intersect both a source and a target geometry matter if not np.isnan(row["_left"]) and not np.isnan(row["_right"]): s_id = int(row["_left"]) t_id = int(row["_right"]) SU[s_id, index] = row[row.geometry.name].area UT[index, t_id] = 1 source_df.drop(["_left"], axis=1, inplace=True) target_df.drop(["_right"], axis=1, inplace=True) return SU, UT def _area_interpolate_binning( source_df, target_df, extensive_variables=None, intensive_variables=None, table=None, allocate_total=True, spatial_index="auto", ): """ Area interpolation for extensive and intensive variables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame extensive_variables : list [Optional. Default=None] Columns in dataframes for extensive variables intensive_variables : list [Optional. Default=None] Columns in dataframes for intensive variables table : scipy.sparse.dok_matrix [Optional. Default=None] Area allocation source-target correspondence table. If not provided, it will be built from `source_df` and `target_df` using `tobler.area_interpolate._area_tables_binning` allocate_total : boolean [Optional. Default=True] True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. spatial_index : str [Optional. Default="auto"] Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \\sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \\sum_k a_{k,j} """ source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if table is None: table = _area_tables_binning(source_df, target_df, spatial_index) den = source_df[source_df.geometry.name].area.values if allocate_total: den = np.asarray(table.sum(axis=1)) den = den + (den == 0) den = 1.0 / den n = den.shape[0] den = den.reshape((n,)) den = diags([den], [0]) weights = den.dot(table) # row standardize table dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = diags([vals], [0]).dot(weights) estimates = estimates.sum(axis=0) extensive.append(estimates.tolist()[0]) extensive = np.asarray(extensive) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) area = np.asarray(table.sum(axis=0)) den = 1.0 / (area + (area == 0)) n, k = den.shape den = den.reshape((k,)) den = diags([den], [0]) weights = table.dot(den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) n = vals.shape[0] vals = vals.reshape((n,)) estimates = diags([vals], [0]) estimates = estimates.dot(weights).sum(axis=0) intensive.append(estimates.tolist()[0]) intensive = np.asarray(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) df = pd.concat(dfs, axis=1) df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True) df = gpd.GeoDataFrame(df.replace(np.inf, np.nan)) return df def _area_interpolate( source_df, target_df, extensive_variables=None, intensive_variables=None, tables=None, allocate_total=True, ): """ Area interpolation for extensive and intensive variables. Parameters ---------- source_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries target_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries extensive_variables : list, (optional) columns in dataframes for extensive variables intensive_variables : list, (optional) columns in dataframes for intensive variables tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t allocate_total : boolean True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{k,j} """ source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if tables is None: SU, UT = _area_tables(source_df, target_df) else: SU, UT = tables den = source_df[source_df.geometry.name].area.values if allocate_total: den = SU.sum(axis=1) den = den + (den == 0) weights = np.dot(np.diag(1 / den), SU) dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = np.dot(np.diag(vals), weights) estimates = np.dot(estimates, UT) estimates = estimates.sum(axis=0) extensive.append(estimates) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) ST = np.dot(SU, UT) area = ST.sum(axis=0) den = np.diag(1.0 / (area + (area == 0))) weights = np.dot(ST, den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) vals.shape = (len(vals), 1) est = (vals * weights).sum(axis=0) intensive.append(est) intensive = np.array(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) df =
pd.concat(dfs, axis=1)
pandas.concat
import os import sys import argparse import random import numpy as np import torch import pandas as pd from dataloaders import datasets from torchvision import transforms import agents import time def get_out_path(args): if args.custom_folder is None: if args.offline: subdir = args.agent_name + '_' + args.model_name + '_' + 'offline/' else: subdir = args.agent_name + '_' + args.model_name else: subdir = args.custom_folder total_path = os.path.join(args.output_dir, args.scenario, subdir) # make output directory if it doesn't already exist if not os.path.exists(total_path): os.makedirs(total_path) return total_path def run(args, run): # read dataframe containing information for each task if args.offline: task_df = pd.read_csv(os.path.join('dataloaders', args.dataset + '_task_filelists', args.scenario, 'run' + str(run), 'offline', 'train_all.txt'), index_col = 0) else: task_df = pd.read_csv(os.path.join('dataloaders', args.dataset + '_task_filelists', args.scenario, 'run' + str(run), 'stream', 'train_all.txt'), index_col = 0) # get classes for each task active_out_nodes = task_df.groupby('task')['label'].unique().map(list).to_dict() # get tasks tasks = task_df.task.unique() # include classes from previous task in active output nodes for current task for i in range(1, len(tasks)): active_out_nodes[i].extend(active_out_nodes[i-1]) # since the same classes might be in multiple tasks, want to consider only the unique elements in each list # mostly an aesthetic thing, will not affect results for i in range(1, len(tasks)): active_out_nodes[i] = list(set(active_out_nodes[i])) # agent parameters agent_config = { 'lr': args.lr, 'n_class': None, 'momentum': args.momentum, 'weight_decay': args.weight_decay, 'model_type' : args.model_type, 'model_name' : args.model_name, 'agent_type' : args.agent_type, 'agent_name' : args.agent_name, 'model_weights': args.model_weights, 'pretrained': args.pretrained, 'feature_extract' : False, 'freeze_feature_extract': args.freeze_feature_extract, 'optimizer':args.optimizer, 'gpuid': args.gpuid, 'reg_coef': args.reg_coef, 'memory_size': args.memory_size, 'n_workers' : args.n_workers, 'memory_Nslots': args.memory_Nslots, 'memory_Nfeat': args.memory_Nfeat, 'freeze_batchnorm': args.freeze_batchnorm, 'freeze_memory': args.freeze_memory, 'batch_size': args.batch_size } if args.dataset == "core50": agent_config["n_class"] = 10 elif args.dataset == "toybox": agent_config["n_class"] = 12 elif args.dataset == "ilab2mlight": agent_config["n_class"] = 14 elif args.dataset == "cifar100": agent_config["n_class"] = 100 else: raise ValueError("Invalid dataset name, try 'core50', 'toybox', or 'ilab2mlight' or 'cifar100'") # initialize agent agent = agents.__dict__[args.agent_type].__dict__[args.agent_name](agent_config) if args.agent_name == "AGEM": img_size = [64, 64] else: img_size = [224, 224] print("Resizing images to " + str(img_size)) if args.dataset == 'core50': # image transformations composed = transforms.Compose([transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # get test data test_data = datasets.CORE50( dataroot = args.dataroot, filelist_root = args.filelist_root, scenario = args.scenario, offline = args.offline, run = run, train = False, transform=composed) elif args.dataset == 'toybox' or args.dataset == 'ilab2mlight' or args.dataset == 'cifar100': # image transformations composed = transforms.Compose( [transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # get test data test_data = datasets.Generic_Dataset( dataroot=args.dataroot, dataset=args.dataset, filelist_root=args.filelist_root, scenario=args.scenario, offline=args.offline, run=run, train=False, transform=composed) else: raise ValueError("Invalid dataset name, try 'core50' or 'toybox' or 'ilab2mlight' or 'cifar100'") if args.validate: # splitting test set into test and validation test_size = int(0.75 * len(test_data)) val_size = len(test_data) - test_size test_data, val_data = torch.utils.data.random_split(test_data, [test_size, val_size]) else: val_data = None test_accs_1st, test_accs, val_accs, test_accs_all_epochs, test_accs_1st_all_epochs = train(agent, composed, args, run, tasks, active_out_nodes, test_data, val_data) return test_accs_1st, test_accs, val_accs, test_accs_all_epochs, test_accs_1st_all_epochs def train(agent, transforms, args, run, tasks, active_out_nodes, test_data, val_data): if args.offline: print('============BEGINNING OFFLINE LEARNING============') else: print('============BEGINNING STREAM LEARNING============') # number of tasks ntask = len(tasks) # to store test accuracies test_accs = [] test_accs_1st = [] # to store val accuracies val_accs = [] test_accs_all_epochs = [] test_accs_1st_all_epochs = [] val_accs_all_epochs = [] # iterate over tasks for task in range(ntask): print('=============Training Task ' + str(task) + '=============') agent.active_out_nodes = active_out_nodes[task] print('Active output nodes for this task: ') print(agent.active_out_nodes) test_accs_all_epochs.append([]) test_accs_1st_all_epochs.append([]) val_accs_all_epochs.append([]) if (args.n_epoch_first_task is not None) and (task == 0): n_epoch = args.n_epoch_first_task else: n_epoch = args.n_epoch for epoch in range(n_epoch): print('===' + args.agent_name + '; Epoch ' + str(epoch) + '; RUN ' + str(run) + '; TASK ' + str(task)) # get training data pertaining to chosen scenario, task, run if args.dataset == 'core50': train_data = datasets.CORE50( dataroot=args.dataroot, filelist_root=args.filelist_root, scenario=args.scenario, offline=args.offline, run=run, batch=task, transform=transforms) elif args.dataset == 'toybox' or args.dataset == 'ilab2mlight' or args.dataset == 'cifar100': train_data = datasets.Generic_Dataset( dataroot=args.dataroot, dataset=args.dataset, filelist_root=args.filelist_root, scenario=args.scenario, offline=args.offline, run=run, batch=task, transform=transforms) else: raise ValueError("Invalid dataset name, try 'core50', 'toybox', or 'ilab2mlight' or 'cifar100'") # get train loader train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=False, num_workers = args.n_workers, pin_memory=True) if args.validate: # then test and val data are subsets, not datasets and need to be dealt with accordingly # get test data only for the seen classes test_inds = [i for i in range(len(test_data)) if test_data.dataset.labels[test_data.indices[i]] in agent.active_out_nodes] # list(range(len(test_data))) task_test_data = torch.utils.data.Subset(test_data, test_inds) #labels = [task_test_data[i] for i in range(len(task_test_data))] test_loader = torch.utils.data.DataLoader( task_test_data, batch_size=args.batch_size, shuffle=False, num_workers = args.n_workers, pin_memory=True) val_inds = [i for i in range(len(val_data)) if val_data.dataset.labels[val_data.indices[i]] in agent.active_out_nodes] task_val_data = torch.utils.data.Subset(val_data, val_inds) val_loader = torch.utils.data.DataLoader( task_val_data, batch_size=args.batch_size, shuffle=False, num_workers = args.n_workers, pin_memory=True) else: # get test data only for the seen classes test_inds = [i for i in range(len(test_data)) if test_data.labels[i] in agent.active_out_nodes] # list(range(len(test_data))) task_test_data = torch.utils.data.Subset(test_data, test_inds) test_loader = torch.utils.data.DataLoader( task_test_data, batch_size=args.batch_size, shuffle=False, num_workers = args.n_workers, pin_memory=True) test_inds_1st = [i for i in range(len(test_data)) if test_data.labels[i] in active_out_nodes[0]] # retrive first task task_test_data_1st = torch.utils.data.Subset(test_data, test_inds_1st) test_loader_1st = torch.utils.data.DataLoader( task_test_data_1st, batch_size=args.batch_size, shuffle=False, num_workers = args.n_workers, pin_memory=True) # learn (new_task_next is true if we are on the last epoch of this task). agent.learn_stream(train_loader, new_task_next=(epoch == n_epoch-1)) # validate if applicable if args.validate: val_acc, val_time = agent.validation(val_loader) print(' * Val Acc: {acc:.3f}, Time: {time:.2f}'.format(acc=val_acc, time=val_time)) val_accs_all_epochs[task].append(val_acc) test_acc, test_time = agent.validation(test_loader) print(' * Test Acc: {acc:.3f}, Time: {time:.2f}'.format(acc=test_acc, time=test_time)) test_accs_all_epochs[task].append(test_acc) test_acc_1st, test_time_1st = agent.validation(test_loader_1st) print(' * Test Acc (1st): {acc:.3f}, Time: {time:.2f}'.format(acc=test_acc_1st, time=test_time_1st)) test_accs_1st_all_epochs[task].append(test_acc_1st) if args.visualize: attread_filename = 'visualization/' + args.scenario + '/' + args.scenario + '_run_' + str(run) + '_task_' + str(task) + '_epoch_' + str(epoch) agent.visualize_att_read(attread_filename) agent.visualize_memory(attread_filename) if args.keep_best_net_all_tasks or (args.keep_best_task1_net and task == 0): # Save state of model torch.save(agent.model.state_dict(), os.path.join(get_out_path(args), "model_state_epoch_" + str(epoch) + ".pth")) if (args.keep_best_net_all_tasks or (args.keep_best_task1_net and task == 0)) and args.n_epoch_first_task > 1: # Reload state of network when it had highest test accuracy on first task max_acc = max(test_accs_all_epochs[task]) max_acc_ind = test_accs_all_epochs[task].index(max_acc) print("Test accs on task " + str(task) + ": " + str(test_accs_all_epochs[task])) print("Loading model parameters with this max test acc: " + str(max_acc)) agent.model.load_state_dict(torch.load( os.path.join(get_out_path(args), "model_state_epoch_" + str(max_acc_ind) + ".pth")) ) reload_test_acc, test_time = agent.validation(test_loader) print(' * Test Acc (after reloading best model): {acc:.3f}, Time: {time:.2f}'.format(acc=reload_test_acc, time=test_time)) assert reload_test_acc == max_acc, "Test accuracy of reloaded model does not match original highest test accuracy. Is the model saving and loading its state correctly?" # Set the test/val accs to be stored for this task to those corresponding to the best-performing network test_acc = max_acc test_acc_1st = test_accs_1st_all_epochs[task][max_acc_ind] if args.validate: val_acc = val_accs_all_epochs[task][max_acc_ind] # Delete saved network states for save_num in range(len(test_accs_all_epochs[task])): os.remove(os.path.join(get_out_path(args), "model_state_epoch_" + str(save_num) + ".pth")) # after all the epochs, store test_acc test_accs.append(test_acc) test_accs_1st.append(test_acc_1st) # same with val acc if val_data is not None: val_accs.append(val_acc) return test_accs_1st, test_accs, val_accs, test_accs_all_epochs, test_accs_1st_all_epochs def get_args(argv): # defining arguments that the user can pass into the program parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default='core50', help="Name of the dataset to use, e.g. 'core50', 'toybox', 'ilab2mlight'") # stream vs offline learning parser.add_argument('--offline', default = False, action = 'store_true', dest = 'offline', help = "offline vs online (stream learning) training") # scenario/task parser.add_argument('--scenario', type = str, default = 'iid', help = "How to set up tasks, e.g. iid => randomly assign data to each task") parser.add_argument('--n_runs', type = int, default = 1, help = "Number of times to repeat the experiment with different data orderings") # model hyperparameters/type parser.add_argument('--model_type', type=str, default='resnet', help="The type (mlp|lenet|vgg|resnet) of backbone network") parser.add_argument('--model_name', type=str, default='ResNet18', help="The name of actual model for the backbone") parser.add_argument('--agent_type', type=str, default='default', help="The type (filename) of agent") parser.add_argument('--agent_name', type=str, default='NormalNN', help="The class name of agent") parser.add_argument('--optimizer', type=str, default='SGD', help="SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...") parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--lr', type=float, default=0.001, help="Learning rate") parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--weight_decay', type=float, default=0) parser.add_argument('--pretrained', default = False, dest = 'pretrained', action = 'store_true') parser.add_argument('--freeze_batchnorm', default = False, dest = 'freeze_batchnorm', action = 'store_true') parser.add_argument('--freeze_memory', default = False, dest = 'freeze_memory', action = 'store_true') parser.add_argument('--freeze_feature_extract', default = False, dest = 'freeze_feature_extract', action = 'store_true') parser.add_argument('--model_weights', type=str, default=None, help="The path to the file for the model weights (*.pth).") parser.add_argument('--n_epoch', type = int, default = 1, help="Number of epochs to train") parser.add_argument('--n_epoch_first_task', type=int, default=None, help="Number of epochs to train on the first task (may be different from n_epoch, which is used for the other tasks)") parser.add_argument('--keep_best_task1_net', default=False, dest='keep_best_task1_net', action='store_true', help="When training for multiple epochs on task 1, retrieve the network state (among those after each epoch) with best testing accuracy for learning subsequent tasks") parser.add_argument('--keep_best_net_all_tasks', default=False, dest='keep_best_net_all_tasks', action='store_true', help="When training for multiple epochs on more than one task: for each task, retrieve the network state (among those after each epoch) with best testing accuracy for learning subsequent tasks") # keep track of validation accuracy parser.add_argument('--validate', default = False, action = 'store_true', dest = 'validate', help = "To keep track of validation accuracy or not") # for regularization models parser.add_argument('--reg_coef', type=float, default=1, help="The coefficient for regularization. Larger means less plasilicity. ") # for replay models parser.add_argument('--memory_size', type=int, default=1200, help="Number of training examples to keep in memory") # for augmented memory model parser.add_argument('--memory_Nslots', type=int, default=100, help="Number of memory slots to keep in memory") parser.add_argument('--memory_Nfeat', type=int, default=512, help="Feature dim per memory slot to keep in memory") parser.add_argument('--visualize', default = False, action = 'store_true', dest = 'visualize', help = "To visualize memory and attentions (only valid for AugMem") # directories #parser.add_argument('--dataroot', type = str, default = 'data/core50', help = "Directory that contains the data") parser.add_argument('--dataroot', type = str, default = '/media/mengmi/KLAB15/Mengmi/proj_CL_NTM/data/core50', help = "Directory that contains the data") #parser.add_argument('--dataroot', type = str, default = '/home/mengmi/Projects/Proj_CL_NTM/data/core50', help = "Directory that contains the data") parser.add_argument('--filelist_root', type = str, default = 'dataloaders', help = "Directory that contains the filelists for each task") parser.add_argument('--output_dir', default='core50_outputs', help="Where to store accuracy table") parser.add_argument('--custom_folder', default=None, type=str, help="a custom subdirectory to store results") # gpu/cpu settings parser.add_argument('--gpuid', nargs="+", type=int, default=[-1], help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only") parser.add_argument('--n_workers', default=1, type = int, help="Number of cpu workers for dataloader") # return parsed arguments args = parser.parse_args(argv) return args def main(): start_time = time.time() # get command line arguments args = get_args(sys.argv[1:]) # appending path to cwd to directories args.dataroot = os.path.join(os.getcwd(),args.dataroot) args.output_dir = os.path.join(os.getcwd(),args.output_dir) # ensure that a valid scenario has been passed if args.scenario not in ['iid', 'class_iid', 'instance', 'class_instance']: print('Invalid scenario passed, must be one of: iid, class_iid, instance, class_instance') return # setting seed for reproducibility torch.manual_seed(0) np.random.seed(0) random.seed(0) test_accs = [] test_accs_1st = [] val_accs = [] test_accs_all_epochs = [] test_accs_1st_all_epochs = [] # iterate over runs for r in range(args.n_runs): print('=============Stream Learning Run ' + str(r) + '=============') test_acc_1st, test_acc, val_acc, test_acc_all_epochs, test_acc_1st_all_epochs = run(args, r) test_accs.append(test_acc) test_accs_1st.append(test_acc_1st) val_accs.append(val_acc) test_accs_all_epochs.append(test_acc_all_epochs) test_accs_1st_all_epochs.append(test_acc_1st_all_epochs) # converting list of list of testing accuracies for each run to a dataframe test_df =
pd.DataFrame(test_accs)
pandas.DataFrame
import os import json import dpdata import numpy as np import pandas as pd import seaborn as sns from glob import glob import shutil from ase.io import read, write from matplotlib import pyplot as plt from miko.utils import logger from miko.resources.submit import JobFactory from miko.utils import LogFactory from miko.utils import canvas_style from miko.utils.lammps import * class DPTask(object): """DPTask is a class reading a DP-GEN directory, where the DP-GEN task run. """ def __init__(self, path, param_file, machine_file, record_file): """ Generate a class of tesla task. :param path: The path of the tesla task. :param param_file: The param json file name. :param machine_file: The machine json file name. :param record_file: The record file name. """ self.path = os.path.abspath(path) self.param_file = param_file self.machine_file = machine_file self.record_file = record_file self._load_task() def train_lcurve(self, iteration=None, model=0, test=True, **kwargs): if iteration is None: if self.step_code < 2: iteration = self.iteration - 1 else: iteration = self.iteration n_iter = 'iter.' + str(iteration).zfill(6) lcurve_path = os.path.join( self.path, n_iter, f'00.train/{str(model).zfill(3)}/lcurve.out') step = np.loadtxt(lcurve_path, usecols=0) if test: energy_train = np.loadtxt(lcurve_path, usecols=4) energy_test = np.loadtxt(lcurve_path, usecols=3) force_train = np.loadtxt(lcurve_path, usecols=6) force_test = np.loadtxt(lcurve_path, usecols=5) else: energy_train = np.loadtxt(lcurve_path, usecols=2) force_train = np.loadtxt(lcurve_path, usecols=3) canvas_style(**kwargs) fig, axs = plt.subplots(2, 1) fig.suptitle("DeepMD training and tests error") # energy figure axs[0].scatter(step[10:], energy_train[10:], alpha=0.4, label='train') if test: axs[0].scatter(step[10:], energy_test[10:], alpha=0.4, label='tests') axs[0].hlines(0.005, step[0], step[-1], linestyles='--', colors='red', label='5 meV') axs[0].hlines(0.01, step[0], step[-1], linestyles='--', colors='blue', label='10 meV') axs[0].hlines(0.05, step[0], step[-1], linestyles='--', label='50 meV') axs[0].set_xlabel('Number of training batch') axs[0].set_ylabel('$E$(eV)') axs[0].legend() # force figure axs[1].scatter(step[10:], force_train[10:], alpha=0.4, label='train') if test: axs[1].scatter(step[10:], force_test[10:], alpha=0.4, label='tests') axs[1].hlines(0.05, step[0], step[-1], linestyles='--', colors='red', label='50 meV/Å') axs[1].hlines(0.1, step[0], step[-1], linestyles='--', colors='blue', label='100 meV/Å') axs[1].hlines(0.2, step[0], step[-1], linestyles='--', label='200 meV/Å') axs[1].set_xlabel('Number of training batch') axs[1].set_ylabel('$F$(eV/Å)') axs[1].legend() return fig def md_make_set(self, iteration=None): location = self.path if iteration is None: if self.step_code < 6: iteration = self.iteration - 1 else: iteration = self.iteration n_iter = 'iter.' + str(iteration).zfill(6) all_data = [] for task in glob(f'{location}/{n_iter}/01.model_devi/task*'): step = np.loadtxt(f'{task}/model_devi.out', usecols=0) dump_freq = step[1] - step[0] max_devi_f = np.loadtxt(f'{task}/model_devi.out', usecols=4) max_devi_e = np.loadtxt(f'{task}/model_devi.out', usecols=3) with open(f'{task}/input.lammps', 'r') as f: lines = f.readlines() temp = float(lines[3].split()[3]) start, final = 0, 0 with open(f'{task}/model_devi.log', 'r') as f: for i, line in enumerate(f): key_line = line.strip() if 'Step Temp' in key_line: start = i + 1 elif 'Loop time of' in key_line: final = i with open(f'{task}/model_devi.log', 'r') as f: if dump_freq > 10: step = int(dump_freq / 10) lines = f.readlines()[start:final:step] else: lines = f.readlines()[start:final] pot_energy = np.array( [p.split()[2] for p in lines if 'WARNING' not in p]).astype('float') try: with open(f'{task}/job.json', 'r') as f: job_dict = json.load(f) except Exception as e: print(e) job_dict = {} result_dict = { 'iter': n_iter, 'temps': temp, 'max_devi_e': max_devi_e, 'max_devi_f': max_devi_f, 'task': task, 't_freq': dump_freq, 'pot_energy': pot_energy } all_dict = {**result_dict, **job_dict} all_data.append(all_dict) return all_data def md_set_pd(self, iteration=None): if iteration is None: if self.step_code < 6: iteration = self.iteration - 1 else: iteration = self.iteration all_data = self.md_make_set(iteration=iteration) df = pd.DataFrame(all_data) return df def md_set_pkl(self, iteration=None): df = self.md_set_pd(iteration=iteration) save_path = self.path os.makedirs(name=f'{save_path}/data_pkl', exist_ok=True) df.to_pickle( f'{save_path}/data_pkl/data_{str(iteration).zfill(2)}.pkl') def md_set_load_pkl(self, iteration): pkl_path = os.path.join( self.path, f'data_pkl/data_{str(iteration).zfill(2)}.pkl') df =
pd.read_pickle(pkl_path)
pandas.read_pickle
""" .. module:: projectdirectory :platform: Unix, Windows :synopsis: A module for examining collections of git repositories as a whole .. moduleauthor:: <NAME> <<EMAIL>> """ import math import sys import os import numpy as np import pandas as pd from git import GitCommandError from gitpandas.repository import Repository __author__ = 'willmcginnis' class ProjectDirectory(object): """ An object that refers to a directory full of git repositories, for bulk analysis. It contains a collection of git-pandas repository objects, created by os.walk-ing a directory to file all child .git subdirectories. :param working_dir: (optional, default=None), the working directory to search for repositories in, None for cwd, or an explicit list of directories containing git repositories :param ignore: (optional, default=None), a list of directories to ignore when searching for git repos. :param verbose: (default=True), if True, will print out verbose logging to terminal :return: """ def __init__(self, working_dir=None, ignore=None, verbose=True): if working_dir is None: self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(os.getcwd()) if '.git' in x[0]]) elif isinstance(working_dir, list): self.repo_dirs = working_dir else: self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(working_dir) if '.git' in x[0]]) self.repos = [Repository(r, verbose=verbose) for r in self.repo_dirs] if ignore is not None: self.repos = [x for x in self.repos if x._repo_name not in ignore] def _repo_name(self): """ Returns a DataFrame of the repo names present in this project directory :return: DataFrame """ ds = [[x._repo_name()] for x in self.repos] df = pd.DataFrame(ds, columns=['repository']) return df def is_bare(self): """ Returns a dataframe of repo names and whether or not they are bare. :return: DataFrame """ ds = [[x._repo_name(), x.is_bare()] for x in self.repos] df = pd.DataFrame(ds, columns=['repository', 'is_bare']) return df def has_coverage(self): """ Returns a DataFrame of repo names and whether or not they have a .coverage file that can be parsed :return: DataFrame """ ds = [[x._repo_name(), x.has_coverage()] for x in self.repos] df = pd.DataFrame(ds, columns=['repository', 'has_coverage']) return df def coverage(self): """ Will return a DataFrame with coverage information (if available) for each repo in the project). If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which will contain the columns: * repository * filename * lines_covered * total_lines * coverage If it can't be found or parsed, an empty DataFrame of that form will be returned. :return: DataFrame """ df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository']) for repo in self.repos: try: cov = repo.coverage() cov['repository'] = repo._repo_name() df = df.append(cov) except GitCommandError as err: print('Warning! Repo: %s seems to not have coverage' % (repo, )) pass df.reset_index() return df def file_change_rates(self, branch='master', limit=None, extensions=None, ignore_dir=None, coverage=False): """ This function will return a DataFrame containing some basic aggregations of the file change history data, and optionally test coverage data from a coverage.py .coverage file. The aim here is to identify files in the project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has a high change rate and poor test coverage, then it is a great candidate for writing more tests. :param branch: (optional, default=master) the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param extensions: (optional, default=None) a list of file extensions to return commits for :param ignore_dir: (optional, default=None) a list of directory names to ignore :param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data. :return: DataFrame """ columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository'] if coverage: columns += ['lines_covered', 'total_lines', 'coverage'] df = pd.DataFrame(columns=columns) for repo in self.repos: try: fcr = repo.file_change_rates(branch=branch, limit=limit, extensions=extensions, ignore_dir=ignore_dir, coverage=coverage) fcr['repository'] = repo._repo_name() df = df.append(fcr) except GitCommandError as err: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) pass df.reset_index() return df def commit_history(self, branch, limit=None, extensions=None, ignore_dir=None, days=None): """ Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is divided by the number of repositories in the project directory to find out how many commits to pull from each project. Future implementations will use date ordering across all projects to get the true most recent N commits across the project. Included in that DataFrame will be the columns: * repository * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param extensions: (optional, default=None) a list of file extensions to return commits for :param ignore_dir: (optional, default=None) a list of directory names to ignore :param days: (optional, default=None) number of days to return if limit is None :return: DataFrame """ if limit is not None: limit = int(limit / len(self.repo_dirs)) df = pd.DataFrame(columns=['author', 'committer', 'date', 'message', 'lines', 'insertions', 'deletions', 'net']) for repo in self.repos: try: ch = repo.commit_history(branch, limit=limit, extensions=extensions, ignore_dir=ignore_dir, days=days) ch['repository'] = repo._repo_name() df = df.append(ch) except GitCommandError as err: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) pass df.reset_index() return df def file_change_history(self, branch='master', limit=None, extensions=None, ignore_dir=None): """ Returns a DataFrame of all file changes (via the commit history) for the specified branch. This is similar to the commit history DataFrame, but is one row per file edit rather than one row per commit (which may encapsulate many file changes). Included in the DataFrame will be the columns: * repository * date (index) * author * committer * message * filename * insertions * deletions :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param extensions: (optional, default=None) a list of file extensions to return commits for :param ignore_dir: (optional, default=None) a list of directory names to ignore :return: DataFrame """ if limit is not None: limit = int(limit / len(self.repo_dirs)) df =
pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])
pandas.DataFrame
import os import pandas as pd import numpy as np import re import logging DATA_PATH = os.getenv('DATA_PATH') if DATA_PATH is None: raise ValueError("DATA_PATH needs to be set") def changeTrade(eba, rightba, wrongba, start=None, end=None, tol=1): logger = logging.getLogger("clean") ind = [True]*len(eba.df.index) if start is not None: ind &= eba.df.index > start if end is not None: ind &= eba.df.index < end ind_diff = (( (eba.df.loc[:, eba.KEY["ID"] % (rightba, wrongba)] + eba.df.loc[ :, eba.KEY["ID"] % (wrongba, rightba)]).abs() > tol) | eba.df.loc[:, eba.KEY["ID"] % (wrongba, rightba)].isna()) ind_diff &= ind eba.df.loc[ind_diff, eba.KEY["ID"] % (wrongba, rightba)] = ( -eba.df.loc[ind_diff, eba.KEY["ID"] % (rightba, wrongba)]) nchange = sum(ind_diff) if nchange > 0: logger.debug("Picking %s over %s for %d pts" % ( rightba, wrongba, nchange)) return eba def fillNAs(eba, col, pad_limit=2, limit=3): logger = logging.getLogger("clean") ind_na = eba.df.loc[:, col].isna() nchange = ind_na.sum() if nchange > 0: logger.debug("%s: %d NA values to deal with" % ( col, nchange)) # first try pad for 2 hours eba.df.loc[:, col] = eba.df.loc[:, col].fillna( method='pad', limit=pad_limit) ind_na = eba.df.loc[:, col].isna() nchange = ind_na.sum() if nchange > 0: logger.debug("%s: replacing %d NA values with next/prev week" % ( col, nchange)) if nchange > 50: logger.warning("%s: replacing %d NA values with next/prev week" % ( col, nchange)) for ts in eba.df.index[ind_na]: try: eba.df.loc[ts, col] = eba.df.loc[ ts-pd.Timedelta("%dH" % (7*24)), col] except KeyError: eba.df.loc[ts, col] = eba.df.loc[ ts+pd.Timedelta("%dH" % (7*24)), col] # If we didn't manage to get the right value, look forward cnt = 0 while np.isnan(eba.df.loc[ts, col]): cnt += 1 if cnt > limit: logger.error("Tried to look %d times ahead for %s" % (limit, str(ts))) raise ValueError("Can't fill this NaN") eba.df.loc[ts, col] = eba.df.loc[ ts+pd.Timedelta("%dH" % (cnt*7*24)), col] return eba def removeOutliers(eba, col, start=None, end=None, thresh_u=None, thresh_l=None, remove=True, limit=4): logger = logging.getLogger("clean") if start is None: start = pd.to_datetime("2016-01-01") if end is None: end = pd.to_datetime("2017-01-02") if (thresh_u is None) and (thresh_l is None): mu = eba.df.loc[start:end, col].mean() sigma = eba.df.loc[start:end, col].std() ind_out = np.abs(eba.df.loc[:, col]-mu) > (3*sigma) else: if thresh_l is None: thresh_l = -np.inf if thresh_u is None: thresh_u = +np.inf ind_out = (eba.df.loc[:, col] < thresh_l) ind_out |= (eba.df.loc[:, col] > thresh_u) ind_out &= (eba.df.index > start) & (eba.df.index < end) nchange = sum(ind_out) logger.debug("%s: %d outliers out of [%.2g, %.2g]" % ( col, nchange, thresh_l, thresh_u)) if nchange > 10: logger.warning("%s: %d outliers out of [%.2g, %.2g]" % ( col, nchange, thresh_l, thresh_u)) if remove: eba.df.loc[ind_out, col] = np.nan return eba def applyFixes3(eba, log_level=logging.INFO): logger = logging.getLogger("clean") log_level_old = logger.level logger.setLevel(log_level) # special changes logger.debug("\tSpecial changes") eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5, start=pd.to_datetime("2016-02-12"), end=pd.to_datetime("2016-02-14")) eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5, start=pd.to_datetime("2016-08-01"), end=pd.to_datetime("2016-08-15")) eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5., start=pd.to_datetime("2016-08-01"), end=pd.to_datetime("2016-08-15")) eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5, start=pd.to_datetime("2016-10-07"), end=pd.to_datetime("2016-10-08 03:00")) eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5., start=pd.to_datetime("2016-10-07"), end=pd.to_datetime("2016-10-08 03:00")) for ba, ba2 in [("IID", "CISO"), ("PJM", "CPLW"), ("PJM", "DUK"), ("PJM", "TVA"), ("FPL", "SOCO"), ("SC", "SOCO"), ("SEPA", "SOCO"), ("CPLW", "TVA"), ("DUK", "TVA"), ("FMPP", "FPL"), ("FPC", "FPL"), ("JEA", "FPL"), ("SEC", "FPL"), ("CPLW", "DUK"), ("YAD", "DUK"), ("SEPA", "DUK"), ("DOPD", "BPAT"), ("LDWP", "BPAT"), ("FMPP", "FPC"), ("SEC", "FPC"), ("LDWP", "PACE"), ("LDWP", "NEVP"), ("SEPA", "SC"), ("FMPP", "TEC"), ("SEC", "JEA"), ("NSB", "FPC"), ("NSB", "FPL")]: eba = fillNAs(eba, eba.KEY["ID"] % (ba, ba2)) eba = changeTrade(eba, ba, ba2, tol=0.) for field in ["D", "NG"]: eba = removeOutliers(eba, eba.get_cols( r="FPC", field=field)[0], thresh_l=200.) eba = removeOutliers(eba, eba.get_cols( r="TVA", field=field)[0], thresh_l=3000.) eba = removeOutliers(eba, eba.get_cols(r="PSCO", field=field)[ 0], thresh_l=2000., thresh_u=10000.) eba = removeOutliers(eba, eba.get_cols( r="PACE", field=field)[0], thresh_u=10000.) eba = removeOutliers( eba, eba.get_cols(r="SRP", field=field)[0], thresh_l=1000., thresh_u=5000., start=pd.to_datetime("2016-12-01"), end=pd.to_datetime("2016-12-31")) eba = removeOutliers( eba, eba.get_cols(r="SRP", field=field)[0], thresh_u=4900., start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-05-01")) eba = removeOutliers(eba, eba.get_cols( r="LDWP", field=field)[0], thresh_l=100.) eba = removeOutliers( eba, eba.get_cols(r="IPCO", field=field)[0], thresh_l=800., start=pd.to_datetime("2016-08-01"), end=pd.to_datetime("2016-08-05")) eba = removeOutliers(eba, eba.get_cols( r="EPE", field=field)[0], thresh_l=100.) eba = removeOutliers(eba, eba.get_cols(r="GVL", field=field)[ 0], thresh_l=50., thresh_u=500.) eba = removeOutliers( eba, eba.get_cols(r="SCL", field="D")[0], thresh_l=500., start=pd.to_datetime("2016-12-01"), end=pd.to_datetime("2016-12-31")) # WACM outliers eba = removeOutliers(eba, eba.get_cols( r="WACM", field="NG")[0], thresh_l=2500.) eba = removeOutliers(eba, eba.get_cols( r="WACM", field="D")[0], thresh_l=2000.) eba = removeOutliers( eba, eba.get_cols(r="WACM", field="D")[0], thresh_u=3000., start=pd.to_datetime("2016-05-01"), end=pd.to_datetime("2016-05-31")) eba = removeOutliers( eba, eba.get_cols(r="WACM", field="NG")[0], thresh_l=3500., start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-01-31")) eba = removeOutliers( eba, eba.get_cols(r="WACM", field="D")[0], thresh_u=4000., start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-01-31")) for field in ["D", "NG", "TI"]: eba = fillNAs(eba, eba.get_cols(r="WACM", field=field)[0]) # WALC outliers for field in ["D", "NG"]: eba = removeOutliers( eba, eba.get_cols(r="WALC", field=field)[0], thresh_u=2000., start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-03-15")) eba = removeOutliers(eba, "EBA.WALC-LDWP.ID.H", thresh_l=100.) eba = fillNAs(eba, eba.KEY["ID"] % ("WALC", "LDWP")) eba = changeTrade(eba, "WALC", "LDWP", tol=0.) eba = removeOutliers( eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=700., start=pd.to_datetime("2016-02-17"), end=pd.to_datetime("2016-02-19")) eba = removeOutliers( eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=200., start=pd.to_datetime("2016-01-01"), end=pd.to_datetime("2016-05-01")) eba = removeOutliers( eba, eba.get_cols(r="WALC", field="D")[0], thresh_l=700., start=pd.to_datetime("2016-03-01"), end=pd.to_datetime("2016-03-08")) eba = removeOutliers( eba, eba.get_cols(r="TPWR", field="D")[0], thresh_l=300., start=pd.to_datetime("2016-10-15"), end=pd.to_datetime("2016-10-17")) eba = removeOutliers(eba, eba.get_cols(r="SEC", field="D")[ 0], thresh_l=40., thresh_u=300.) # TIDC outliers for field in ["D", "NG"]: eba = removeOutliers( eba, eba.get_cols(r="TIDC", field=field)[0], thresh_l=50., start=pd.to_datetime("2016-10-01")) eba = removeOutliers( eba, eba.get_cols(r="TIDC", field="D")[0], thresh_l=200., thresh_u=500, start=pd.to_datetime("2016-10-31"), end=
pd.to_datetime("2016-11-01")
pandas.to_datetime
import pandas as pd import re interest = "introduction" csv_location = "/Users/apple/Desktop/UBCHackathon_Local/UBCHackathon/ubc_course_calendar_data_new.csv" data = pd.read_csv(csv_location) data.dropna(inplace = True) sub = interest data["Indexes"]= data["COURSE_DESCRIPTION"].str.contains(sub, case=False) print("data = \n", data[data["Indexes"]==True]) # disp_str = str(data.loc[data['Indexes'] == True]['COURSE_TITLE']) df =
pd.DataFrame(data[data['Indexes'] == True]['COURSE_TITLE'])
pandas.DataFrame
from pymethylprocess.MethylationDataTypes import MethylationArray, extract_pheno_beta_df_from_pickle_dict from methylnet.models import * from methylnet.datasets import get_methylation_dataset import torch from torch.utils.data import DataLoader from torch.nn import MSELoss, BCELoss, CrossEntropyLoss, NLLLoss import pickle import pandas as pd, numpy as np import click import os, copy from os.path import join from collections import Counter from torch.utils.data.sampler import SequentialSampler CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90) @click.group(context_settings= CONTEXT_SETTINGS) @click.version_option(version='0.1') def prediction(): pass def train_predict(train_pkl,test_pkl,input_vae_pkl,output_dir,cuda,interest_cols,categorical,disease_only,hidden_layer_topology,learning_rate_vae,learning_rate_mlp,weight_decay,dropout_p,n_epochs, scheduler='null', decay=0.5, t_max=10, eta_min=1e-6, t_mult=2, batch_size=50, val_pkl='val_methyl_array.pkl', n_workers=8, add_validation_set=False, loss_reduction='sum', add_softmax=False, no_vae=False): os.makedirs(output_dir,exist_ok=True) output_file = join(output_dir,'results.csv') training_curve_file = join(output_dir, 'training_val_curve.p') results_file = join(output_dir,'results.p') output_file_latent = join(output_dir,'latent.csv') output_model = join(output_dir,'output_model.p') output_pkl = join(output_dir, 'vae_mlp_methyl_arr.pkl') output_onehot_encoder = join(output_dir, 'one_hot_encoder.p') #input_dict = pickle.load(open(input_pkl,'rb')) if not no_vae: if cuda: vae_model = torch.load(input_vae_pkl) vae_model.cuda_on=True else: vae_model = torch.load(input_vae_pkl,map_location='cpu') vae_model.cuda_on=False train_methyl_array, val_methyl_array, test_methyl_array = MethylationArray.from_pickle(train_pkl), MethylationArray.from_pickle(val_pkl), MethylationArray.from_pickle(test_pkl)#methyl_array.split_train_test(train_p=train_percent, stratified=(True if categorical else False), disease_only=disease_only, key=interest_cols[0], subtype_delimiter=',') if not categorical: train_methyl_array.remove_na_samples(interest_cols) val_methyl_array.remove_na_samples(interest_cols) test_methyl_array.remove_na_samples(interest_cols) print(train_methyl_array.beta.shape) n_input=train_methyl_array.beta.shape[1] print(val_methyl_array.beta.shape) print(test_methyl_array.beta.shape) if len(interest_cols) == 1 and disease_only and interest_cols[0].endswith('_only')==False: print(interest_cols) interest_cols[0] += '_only' print(train_methyl_array.pheno[interest_cols[0]].unique()) print(test_methyl_array.pheno[interest_cols[0]].unique()) train_methyl_dataset = get_methylation_dataset(train_methyl_array,interest_cols,categorical=categorical, predict=True) # train, test split? Add val set? #print(list(train_methyl_dataset.encoder.get_feature_names())) val_methyl_dataset = get_methylation_dataset(val_methyl_array,interest_cols,categorical=categorical, predict=True, categorical_encoder=train_methyl_dataset.encoder) test_methyl_dataset = get_methylation_dataset(test_methyl_array,interest_cols,categorical=categorical, predict=True, categorical_encoder=train_methyl_dataset.encoder) if not batch_size: batch_size=len(train_methyl_dataset) train_batch_size = min(batch_size,len(train_methyl_dataset)) val_batch_size = min(batch_size,len(val_methyl_dataset)) train_loader_args=dict( dataset=train_methyl_dataset, num_workers=n_workers, batch_size=train_batch_size, shuffle=True) val_loader_args=dict(dataset=val_methyl_dataset, num_workers=n_workers, batch_size=val_batch_size, shuffle=True) test_loader_args=dict(dataset=test_methyl_dataset, num_workers=n_workers, batch_size=min(batch_size,len(test_methyl_dataset)), shuffle=False) train_methyl_dataloader = DataLoader(**train_loader_args) val_methyl_dataloader = DataLoader(**val_loader_args) # False test_methyl_dataloader = DataLoader(**test_loader_args) scaling_factors = dict(val=float(len(val_methyl_dataset))/((len(val_methyl_dataset)//val_batch_size)*val_batch_size), train_batch_size=train_batch_size,val_batch_size=val_batch_size) if no_vae: model=MLP(n_input=n_input, hidden_topology=hidden_layer_topology, dropout_p=dropout_p, n_outputs=train_methyl_dataset.outcome_col.shape[1], binary=False, softmax=add_softmax, relu_out=False) else: model=VAE_MLP(vae_model=vae_model,categorical=categorical,hidden_layer_topology=hidden_layer_topology,n_output=train_methyl_dataset.outcome_col.shape[1],dropout_p=dropout_p, add_softmax=add_softmax) print(model) if cuda: model=model.cuda() class_weights=[] if categorical: out_weight=Counter(np.argmax(train_methyl_dataset.outcome_col,axis=1)) #total_samples=sum(out_weight.values()) for k in sorted(list(out_weight.keys())): class_weights.append(1./float(out_weight[k])) # total_samples class_weights=np.array(class_weights) class_weights=(class_weights/class_weights.sum()).tolist() print(class_weights) if class_weights: class_weights = torch.FloatTensor(class_weights) if cuda: class_weights = class_weights.cuda() else: class_weights = None if not no_vae: optimizer_vae = torch.optim.Adam(model.vae.parameters(), lr = learning_rate_vae, weight_decay=weight_decay) optimizer_mlp = torch.optim.Adam(model.mlp.parameters(), lr = learning_rate_mlp, weight_decay=weight_decay) loss_fn = CrossEntropyLoss(reduction=loss_reduction,weight= class_weights) if categorical else MSELoss(reduction=loss_reduction) # 'sum' scheduler_opts=dict(scheduler=scheduler,lr_scheduler_decay=decay,T_max=t_max,eta_min=eta_min,T_mult=t_mult) if not no_vae: mlp=MLPFinetuneVAE(mlp_model=model,n_epochs=n_epochs,categorical=categorical,loss_fn=loss_fn,optimizer_vae=optimizer_vae,optimizer_mlp=optimizer_mlp,cuda=cuda, scheduler_opts=scheduler_opts) else: mlp=MLPTrainer(mlp_model=model, n_epoch=n_epochs, validation_dataloader=val_methyl_dataloader, optimizer_opts=dict(name='adam',lr=learning_rate_mlp,weight_decay=weight_decay), scheduler_opts=scheduler_opts, loss_fn=loss_fn, categorical=categorical) if add_validation_set and not no_vae: mlp.add_validation_set(val_methyl_dataloader) mlp = mlp.fit(train_methyl_dataloader) if 'encoder' in dir(train_methyl_dataset): pickle.dump(train_methyl_dataset.encoder,open(output_onehot_encoder,'wb')) results = dict(test={},train={},val={}) if no_vae: train_loader_args['shuffle']=False val_loader_args['shuffle']=False train_methyl_dataloader = DataLoader(**train_loader_args) val_methyl_dataloader = DataLoader(**val_loader_args) # False #train_methyl_dataloader.sampler=SequentialSampler(train_methyl_dataset) #val_methyl_dataloader.sampler=SequentialSampler(val_methyl_dataset) results['train']['y_pred'] = mlp.predict(train_methyl_dataloader) results['val']['y_pred'] = mlp.predict(val_methyl_dataloader) results['test']['y_pred'] = mlp.predict(test_methyl_dataloader) results['train']['y_true'],results['val']['y_true'],results['test']['y_true']=train_methyl_dataset.outcome_col,val_methyl_dataset.outcome_col,test_methyl_dataset.outcome_col if categorical: for k in results: results[k]['y_true']=results[k]['y_true'].argmax(1) else: results['train']['y_pred'], results['train']['y_true'], _, _ = mlp.predict(train_methyl_dataloader) results['val']['y_pred'], results['val']['y_true'], _, _ = mlp.predict(val_methyl_dataloader) del train_methyl_dataloader, train_methyl_dataset """methyl_dataset=get_methylation_dataset(methyl_array,interest_cols,predict=True) methyl_dataset_loader = DataLoader( dataset=methyl_dataset, num_workers=9, batch_size=1, shuffle=False)""" Y_pred, Y_true, latent_projection, _ = mlp.predict(test_methyl_dataloader) # FIXME change to include predictions for all classes for AUC results['test']['y_pred'], results['test']['y_true'] = copy.deepcopy(Y_pred), copy.deepcopy(Y_true) if categorical: Y_true=Y_true.argmax(axis=1)[:,np.newaxis] Y_pred=Y_pred.argmax(axis=1)[:,np.newaxis] test_methyl_array = test_methyl_dataset.to_methyl_array() """if categorical: Y_true=test_methyl_dataset.encoder.inverse_transform(Y_true)[:,np.newaxis] Y_pred=test_methyl_dataset.encoder.inverse_transform(Y_pred)[:,np.newaxis]""" #sample_names = np.array(list(test_methyl_array.beta.index)) # FIXME #outcomes = np.array([outcome[0] for outcome in outcomes]) # FIXME Y_pred=pd.DataFrame(Y_pred.flatten() if (np.array(Y_pred.shape)==1).any() else Y_pred,index=test_methyl_array.beta.index,columns=(['y_pred'] if categorical else interest_cols))#dict(zip(sample_names,outcomes)) Y_true=pd.DataFrame(Y_true.flatten() if (np.array(Y_true.shape)==1).any() else Y_true,index=test_methyl_array.beta.index,columns=(['y_true'] if categorical else interest_cols)) results_df = pd.concat([Y_pred,Y_true],axis=1) if categorical else pd.concat([Y_pred.rename(columns={name:name+'_pred' for name in list(Y_pred)}),Y_true.rename(columns={name:name+'_true' for name in list(Y_pred)})],axis=1) # FIXME latent_projection=pd.DataFrame(latent_projection,index=test_methyl_array.beta.index) test_methyl_array.beta=latent_projection test_methyl_array.write_pickle(output_pkl) pickle.dump(mlp.training_plot_data,open(training_curve_file,'wb')) latent_projection.to_csv(output_file_latent) results_df.to_csv(output_file)#pickle.dump(outcome_dict, open(outcome_dict_file,'wb')) pickle.dump(results,open(results_file,'wb')) torch.save(mlp.model if not no_vae else mlp.mlp,output_model) return latent_projection, Y_pred, Y_true, mlp, scaling_factors # ADD OUTPUT METRICS AND TRAINING PLOT CURVE @prediction.command() # FIXME finish this!! @click.option('-i', '--train_pkl', default='./train_val_test_sets/train_methyl_array.pkl', help='Input database for beta and phenotype data.', type=click.Path(exists=False), show_default=True) @click.option('-tp', '--test_pkl', default='./train_val_test_sets/test_methyl_array.pkl', help='Test database for beta and phenotype data.', type=click.Path(exists=False), show_default=True) @click.option('-vae', '--input_vae_pkl', default='./embeddings/output_model.p', help='Trained VAE.', type=click.Path(exists=False), show_default=True) @click.option('-o', '--output_dir', default='./predictions/', help='Output directory for predictions.', type=click.Path(exists=False), show_default=True) @click.option('-c', '--cuda', is_flag=True, help='Use GPUs.') @click.option('-ic', '--interest_cols', default=['disease'], multiple=True, help='Specify columns looking to make predictions on.', show_default=True) @click.option('-cat', '--categorical', is_flag=True, help='Multi-class prediction.', show_default=True) @click.option('-do', '--disease_only', is_flag=True, help='Only look at disease, or text before subtype_delimiter.') @click.option('-hlt', '--hidden_layer_topology', default='', help='Topology of hidden layers, comma delimited, leave empty for one layer encoder, eg. 100,100 is example of 5-hidden layer topology.', type=click.Path(exists=False), show_default=True) @click.option('-lr_vae', '--learning_rate_vae', default=1e-5, help='Learning rate VAE.', show_default=True) @click.option('-lr_mlp', '--learning_rate_mlp', default=1e-3, help='Learning rate MLP.', show_default=True) @click.option('-wd', '--weight_decay', default=1e-4, help='Weight decay of adam optimizer.', show_default=True) @click.option('-dp', '--dropout_p', default=0.2, help='Dropout Percentage.', show_default=True) @click.option('-e', '--n_epochs', default=50, help='Number of epochs to train over.', show_default=True) @click.option('-s', '--scheduler', default='null', help='Type of learning rate scheduler.', type=click.Choice(['null','exp','warm_restarts']),show_default=True) @click.option('-d', '--decay', default=0.5, help='Learning rate scheduler decay for exp selection.', show_default=True) @click.option('-t', '--t_max', default=10, help='Number of epochs before cosine learning rate restart.', show_default=True) @click.option('-eta', '--eta_min', default=1e-6, help='Minimum cosine LR.', show_default=True) @click.option('-m', '--t_mult', default=2., help='Multiply current restart period times this number given number of restarts.', show_default=True) @click.option('-bs', '--batch_size', default=50, show_default=True, help='Batch size.') @click.option('-vp', '--val_pkl', default='./train_val_test_sets/val_methyl_array.pkl', help='Validation Set Methylation Array Location.', show_default=True, type=click.Path(exists=False),) @click.option('-w', '--n_workers', default=9, show_default=True, help='Number of workers.') @click.option('-v', '--add_validation_set', is_flag=True, help='Evaluate validation set.') @click.option('-l', '--loss_reduction', default='sum', show_default=True, help='Type of reduction on loss function.', type=click.Choice(['sum','elementwise_mean','none'])) @click.option('-hl', '--hyperparameter_log', default='predictions/predict_hyperparameters_log.csv', show_default=True, help='CSV file containing prior runs.', type=click.Path(exists=False)) @click.option('-j', '--job_name', default='predict_job', show_default=True, help='Embedding job name.', type=click.Path(exists=False)) @click.option('-sft', '--add_softmax', is_flag=True, help='Add softmax for predicting probability distributions. Experimental.') @click.option('-nv', '--no_vae', is_flag=True, help='Predict only using MLP, no pretraining. Experimental. No hyperparameter scan. Need to add forward hook.') def make_prediction(train_pkl,test_pkl,input_vae_pkl,output_dir,cuda,interest_cols,categorical,disease_only,hidden_layer_topology,learning_rate_vae,learning_rate_mlp,weight_decay,dropout_p,n_epochs, scheduler='null', decay=0.5, t_max=10, eta_min=1e-6, t_mult=2, batch_size=50, val_pkl='val_methyl_array.pkl', n_workers=8, add_validation_set=False, loss_reduction='sum', hyperparameter_log='predictions/predict_hyperparameters_log.csv', job_name='predict_job', add_softmax=False, no_vae=False): """Train prediction model by fine-tuning VAE and appending/training MLP to make classification/regression predictions on MethylationArrays.""" hlt_list=filter(None,hidden_layer_topology.split(',')) if hlt_list: hidden_layer_topology=list(map(int,hlt_list)) else: hidden_layer_topology=[] latent_projection, Y_pred, Y_true, vae_mlp, scaling_factors = train_predict(train_pkl,test_pkl,input_vae_pkl,output_dir,cuda,list(interest_cols),categorical,disease_only,hidden_layer_topology,learning_rate_vae,learning_rate_mlp,weight_decay,dropout_p,n_epochs, scheduler, decay, t_max, eta_min, t_mult, batch_size, val_pkl, n_workers, add_validation_set, loss_reduction, add_softmax, no_vae) accuracy, precision, recall, f1 = -1,-1,-1,-1 if categorical: from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score accuracy, precision, recall, f1 = accuracy_score(Y_true,Y_pred), precision_score(Y_true,Y_pred,average='weighted'), recall_score(Y_true,Y_pred,average='weighted'), f1_score(Y_true,Y_pred,average='weighted') else: from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score, mean_absolute_error accuracy, precision, recall, f1 = mean_squared_error(Y_true,Y_pred,multioutput='raw_values'), r2_score(Y_true,Y_pred,multioutput='raw_values'), explained_variance_score(Y_true,Y_pred,multioutput='raw_values'), mean_absolute_error(Y_true,Y_pred,multioutput='raw_values') if len(interest_cols)>1: accuracy, precision, recall, f1 = ';'.join([str(v) for v in accuracy]),';'.join([str(v) for v in precision]),';'.join([str(v) for v in recall]),';'.join([str(v) for v in f1]) else: accuracy, precision, recall, f1=accuracy[0], precision[0], recall[0], f1[0] hyperparameter_row = [job_name,n_epochs, vae_mlp.best_epoch, vae_mlp.min_loss, vae_mlp.min_val_loss, vae_mlp.min_val_loss*scaling_factors['val'], accuracy, precision, recall, f1, vae_mlp.model.vae.n_input, vae_mlp.model.vae.n_latent, str(hidden_layer_topology), learning_rate_vae, learning_rate_mlp, weight_decay, scheduler, t_max, t_mult, eta_min, scaling_factors['train_batch_size'], scaling_factors['val_batch_size'], dropout_p] hyperparameter_df = pd.DataFrame(columns=['job_name','n_epochs',"best_epoch", "min_loss", "min_val_loss", "min_val_loss-batchsize_adj","test_accuracy" if categorical else 'neg_mean_squared_error', "test_precision" if categorical else 'r2_score', "test_recall" if categorical else 'explained_variance', "test_f1" if categorical else 'mean_absolute_error', "n_input", "n_latent", "hidden_layer_encoder_topology", "learning_rate_vae", "learning_rate_mlp", "weight_decay", "scheduler", "t_max", "t_mult", "eta_min","train_batch_size", "val_batch_size", "dropout_p"]) hyperparameter_df.loc[0] = hyperparameter_row if os.path.exists(hyperparameter_log): print('APPEND') hyperparameter_df_former = pd.read_csv(hyperparameter_log) hyperparameter_df_former=hyperparameter_df_former[[col for col in list(hyperparameter_df) if not col.startswith('Unnamed')]] hyperparameter_df=pd.concat([hyperparameter_df_former,hyperparameter_df],axis=0) hyperparameter_df.to_csv(hyperparameter_log) @prediction.command() @click.option('-tp', '--test_pkl', default='./train_val_test_sets/test_methyl_array.pkl', help='Test database for beta and phenotype data.', type=click.Path(exists=False), show_default=True) @click.option('-m', '--model_pickle', default='./predictions/output_model.p', help='Pytorch model containing forward_predict method.', type=click.Path(exists=False), show_default=True) @click.option('-bs', '--batch_size', default=50, show_default=True, help='Batch size.') @click.option('-w', '--n_workers', default=9, show_default=True, help='Number of workers.') @click.option('-ic', '--interest_cols', default=['disease'], multiple=True, help='Specify columns looking to make predictions on.', show_default=True) @click.option('-cat', '--categorical', is_flag=True, help='Multi-class prediction.', show_default=True) @click.option('-c', '--cuda', is_flag=True, help='Use GPUs.') @click.option('-e', '--categorical_encoder', default='./predictions/one_hot_encoder.p', help='One hot encoder if categorical model. If path exists, then return top positive controbutions per samples of that class. Encoded values must be of sample class as interest_col.', type=click.Path(exists=False), show_default=True) @click.option('-o', '--output_dir', default='./new_predictions/', help='Output directory for predictions.', type=click.Path(exists=False), show_default=True) def make_new_predictions(test_pkl, model_pickle, batch_size, n_workers, interest_cols, categorical, cuda, categorical_encoder, output_dir): """Run prediction model again to further assess outcome. Only evaluate prediction model.""" os.makedirs(output_dir,exist_ok=True) test_methyl_array = MethylationArray.from_pickle(test_pkl) # generate results pickle to run through classification/regression report if cuda: model = torch.load(model_pickle) model.vae.cuda_on=True else: model = torch.load(model_pickle,map_location='cpu') model.vae.cuda_on=False if not categorical: test_methyl_array.remove_na_samples(interest_cols if len(interest_cols)>1 else interest_cols[0]) if os.path.exists(categorical_encoder): categorical_encoder=pickle.load(open(categorical_encoder,'rb')) else: categorical_encoder=None test_methyl_dataset = get_methylation_dataset(test_methyl_array,interest_cols,categorical=categorical, predict=True, categorical_encoder=categorical_encoder) test_methyl_dataloader = DataLoader( dataset=test_methyl_dataset, num_workers=n_workers, batch_size=min(batch_size,len(test_methyl_dataset)), shuffle=False) vae_mlp=MLPFinetuneVAE(mlp_model=model,categorical=categorical,cuda=cuda) Y_pred, Y_true, latent_projection, _ = vae_mlp.predict(test_methyl_dataloader) results = dict(test={}) results['test']['y_pred'], results['test']['y_true'] = copy.deepcopy(Y_pred), copy.deepcopy(Y_true) if categorical: Y_true=Y_true.argmax(axis=1)[:,np.newaxis] Y_pred=Y_pred.argmax(axis=1)[:,np.newaxis] test_methyl_array = test_methyl_dataset.to_methyl_array() Y_pred=pd.DataFrame(Y_pred.flatten() if (np.array(Y_pred.shape)==1).any() else Y_pred,index=test_methyl_array.beta.index,columns=(['y_pred'] if categorical else interest_cols)) Y_true=pd.DataFrame(Y_true.flatten() if (np.array(Y_true.shape)==1).any() else Y_true,index=test_methyl_array.beta.index,columns=(['y_true'] if categorical else interest_cols)) results_df = pd.concat([Y_pred,Y_true],axis=1) if categorical else pd.concat([Y_pred.rename(columns={name:name+'_pred' for name in list(Y_pred)}),Y_true.rename(columns={name:name+'_true' for name in list(Y_pred)})],axis=1) # FIXME latent_projection=pd.DataFrame(latent_projection,index=test_methyl_array.beta.index) test_methyl_array.beta=latent_projection output_file = join(output_dir,'results.csv') results_file = join(output_dir,'results.p') output_file_latent = join(output_dir,'latent.csv') output_pkl = join(output_dir, 'vae_mlp_methyl_arr.pkl') test_methyl_array.write_pickle(output_pkl) pickle.dump(results,open(results_file,'wb')) latent_projection.to_csv(output_file_latent) results_df.to_csv(output_file) @prediction.command() @click.option('-hcsv', '--hyperparameter_input_csv', default='predictions/predict_hyperparameters_scan_input.csv', show_default=True, help='CSV file containing hyperparameter inputs.', type=click.Path(exists=False)) @click.option('-hl', '--hyperparameter_output_log', default='predictions/predict_hyperparameters_log.csv', show_default=True, help='CSV file containing prior runs.', type=click.Path(exists=False)) @click.option('-g', '--generate_input', is_flag=True, help='Generate hyperparameter input csv.') @click.option('-c', '--job_chunk_size', default=1, help='If not series, chunk up and run these number of commands at once..') @click.option('-ic', '--interest_cols', default=['disease_only'], multiple=True, help='Column to stratify samples on.') @click.option('-cat', '--categorical', is_flag=True, help='Whether to run categorical analysis or not.') @click.option('-r', '--reset_all', is_flag=True, help='Run all jobs again.') @click.option('-t', '--torque', is_flag=True, help='Submit jobs on torque.') @click.option('-gpu', '--gpu', default=-1, help='If torque submit, which gpu to use.', show_default=True) @click.option('-gn', '--gpu_node', default=-1, help='If torque submit, which gpu node to use.', show_default=True) @click.option('-nh', '--nohup', is_flag=True, help='Nohup launch jobs.') @click.option('-n', '--n_jobs_relaunch', default=0, help='Relaunch n top jobs from previous run.', show_default=True) @click.option('-cp', '--crossover_p', default=0., help='Rate of crossover between hyperparameters.', show_default=True) @click.option('-mc', '--model_complexity_factor', default=1., help='Degree of neural network model complexity for hyperparameter search. Search for less wide networks with a lower complexity value, bounded between 0 and infinity.', show_default=True) @click.option('-j', '--n_jobs', default=4, help='Number of jobs to generate.') @click.option('-v', '--val_loss_column', default="min_val_loss-batchsize_adj", help='Validation loss column.', type=click.Path(exists=False)) @click.option('-sft', '--add_softmax', is_flag=True, help='Add softmax for predicting probability distributions.') @click.option('-a', '--additional_command', default='', help='Additional command to input for torque run.', type=click.Path(exists=False)) @click.option('-cu', '--cuda', is_flag=True, help='Use GPUs.') @click.option('-grid', '--hyperparameter_yaml', default='', help='YAML file with custom subset of hyperparameter grid.', type=click.Path(exists=False)) @click.option('-rs', '--randomseed', default=42, help='Seed job creation.') @click.option('-ao', '--additional_opts', default='', help='Additional options for torque submission.', type=click.Path(exists=False)) @click.option('-me', '--max_epochs', default=-1, help='Maximum number of epochs to limit training time. -1 considers entire grid', show_default=True) def launch_hyperparameter_scan(hyperparameter_input_csv, hyperparameter_output_log, generate_input, job_chunk_size, interest_cols, categorical, reset_all, torque, gpu, gpu_node, nohup, n_jobs_relaunch, crossover_p, model_complexity_factor,n_jobs, val_loss_column, add_softmax, additional_command, cuda, hyperparameter_yaml, randomseed, additional_opts, max_epochs): """Run randomized hyperparameter scan of neural network hyperparameters.""" from methylnet.hyperparameter_scans import coarse_scan, find_top_jobs custom_jobs=[] if n_jobs_relaunch: custom_jobs=find_top_jobs(hyperparameter_input_csv, hyperparameter_output_log,n_jobs_relaunch, crossover_p, val_loss_column) if os.path.exists(hyperparameter_yaml): from ruamel.yaml import safe_load as load with open(hyperparameter_yaml) as f: new_grid = load(f.read()) else: new_grid = {} coarse_scan(hyperparameter_input_csv, hyperparameter_output_log, generate_input, job_chunk_size, interest_cols, reset_all, torque, gpu, gpu_node, nohup, mlp=True, custom_jobs=custom_jobs, model_complexity_factor=model_complexity_factor,n_jobs=n_jobs, categorical=categorical,add_softmax=add_softmax, additional_command=additional_command, cuda=cuda, new_grid=new_grid, randomseed=randomseed, additional_opts=additional_opts, max_epochs=max_epochs) @prediction.command() @click.option('-r', '--results_pickle', default='predictions/results.p', show_default=True, help='Results from training, validation, and testing.', type=click.Path(exists=False)) @click.option('-o', '--output_dir', default='results/', show_default=True, help='Output directory.', type=click.Path(exists=False)) def regression_report(results_pickle,output_dir): """Generate regression report that gives concise results from regression tasks.""" # FIXME expand functionality import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns sns.set(style="whitegrid") os.makedirs(output_dir,exist_ok=True) results_dict=pickle.load(open(results_pickle,'rb')) for k in results_dict: df_pred=pd.DataFrame(results_dict[k]['y_pred']).melt() df_pred=df_pred.rename(columns=dict(value='y_pred',variable='outcome_vars')) df_true=pd.DataFrame(results_dict[k]['y_true']).melt() df_true=df_true.rename(columns=dict(value='y_true',variable='true_vars')) df=pd.concat([df_pred,df_true],axis=1) #print(df) df=df[['outcome_vars','y_pred','y_true']] plt.figure() sns.lmplot(x="y_pred", y="y_true", col="outcome_vars", hue="outcome_vars",data=df, col_wrap=2) plt.savefig(os.path.join(output_dir,'{}_regression_results.png'.format(k)),dpi=300) @prediction.command() @click.option('-r', '--results_pickle', default='predictions/results.p', show_default=True, help='Results from training, validation, and testing.', type=click.Path(exists=False)) @click.option('-o', '--output_dir', default='results/', show_default=True, help='Output directory.', type=click.Path(exists=False)) @click.option('-e', '--categorical_encoder', default='./predictions/one_hot_encoder.p', help='One hot encoder if categorical model. If path exists, then return top positive controbutions per samples of that class. Encoded values must be of sample class as interest_col.', type=click.Path(exists=False), show_default=True) @click.option('-a', '--average_mechanism', default='micro', show_default=True, help='Output directory.', type=click.Choice(['weighted','macro','micro','binary','samples'])) def classification_report(results_pickle,output_dir, categorical_encoder, average_mechanism): """Generate classification report that gives results from classification tasks.""" from mlxtend.evaluate import bootstrap from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, roc_curve, roc_auc_score, confusion_matrix os.makedirs(output_dir,exist_ok=True) def extract_ys(Y): return Y[:,0], Y[:,1:] def accuracy(Y): y_true, y_pred=extract_ys(Y) y_pred = np.argmax(y_pred,axis=1) return accuracy_score(y_true, y_pred) def recall(Y): y_true, y_pred=extract_ys(Y) y_pred = np.argmax(y_pred,axis=1) return recall_score(y_true, y_pred, average=average_mechanism) def precision(Y): y_true, y_pred=extract_ys(Y) y_pred = np.argmax(y_pred,axis=1) return precision_score(y_true, y_pred, average=average_mechanism) def f1(Y): y_true, y_pred=extract_ys(Y) y_pred = np.argmax(y_pred,axis=1) return f1_score(y_true, y_pred, average=average_mechanism) def auc(Y): y_true, y_pred=extract_ys(Y) y_pred_labels=np.argmax(y_pred,1) supports={i:sum((y_pred_labels[np.squeeze(y_true==i)]==i).astype(int)) for i in np.unique(y_true)} final_auc_score=0. for i in supports: final_auc_score+=supports[i]*roc_auc_score((y_true==i).astype(int), y_pred[:,int(i)], average='weighted') final_auc_score/=sum(list(supports.values())) return final_auc_score def to_y_probas(y_pred): return np.exp(y_pred)/np.exp(y_pred).sum(1)[:,np.newaxis] results_dict=pickle.load(open(results_pickle,'rb')) if os.path.exists(categorical_encoder): categorical_encoder=pickle.load(open(categorical_encoder,'rb')) else: categorical_encoder=None df_roc=[] final_results=[] for k in results_dict: y_true=results_dict[k]['y_true'] y_true_labels=np.argmax(y_true,axis=1)[:,np.newaxis] classes=np.unique(y_true_labels) y_pred=to_y_probas(results_dict[k]['y_pred']) y_pred_labels=np.argmax(y_pred,1).reshape((-1,1)) #print(y_true_labels, y_pred_labels) out_classes = classes.astype(int)# if categorical_encoder == None else categorical_encoder.inverse_transform(classes.astype(int).reshape((-1,1))) class_labels = out_classes if categorical_encoder == None else categorical_encoder.inverse_transform(out_classes) pd.DataFrame(confusion_matrix(y_true_labels.astype(int).flatten(), y_pred_labels.astype(int).flatten(),labels=out_classes),index=class_labels,columns=class_labels).to_csv(join(output_dir,'{}_confusion_mat.csv'.format(k))) Y=np.hstack((y_true_labels,y_pred)) supports={i:sum((y_pred_labels[np.squeeze(y_true_labels==i)]==i).astype(int)) for i in classes} fpr = dict() tpr = dict() for i in supports: fpr[i], tpr[i], _ = roc_curve(y_true[:,i], y_pred[:,i]) all_fpr = np.unique(np.concatenate([fpr[i] for i in supports])) mean_tpr = np.zeros_like(all_fpr) for i in supports: mean_tpr += supports[i]*np.interp(all_fpr, fpr[i], tpr[i]) mean_tpr/=sum(list(supports.values())) tpr,fpr=mean_tpr,all_fpr df=pd.DataFrame({'fpr' : fpr, 'tpr' : tpr}) df['Legend'] = "{} Weighted ROC, AUC={}".format(k,round(auc(Y),2)) df_roc.append(df) fns = dict(accuracy=accuracy,recall=recall,precision=precision,f1=f1,auc=auc) for fn in fns: print(k,fn) original, std_err, ci_bounds = bootstrap(Y, num_rounds=1000, func=fns[fn], ci=0.95, seed=123) low,high=ci_bounds final_results.append([k,fn,original,std_err,low,high]) final_results=pd.DataFrame(final_results,columns=['DataSet','Metric','Score','Error','95% CI Low','95% CI High']) df_roc=
pd.concat(df_roc)
pandas.concat
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Nov 7 11:42:15 2018 @author: akumler This script contains all the hard code that produces the solar forecast. The solar forecation application imports this module to get desired data. This is the second version, removing the requirement that a previous observation is needed in order to make the forecast (reconstruction). Other improvements are added. """ import pandas as pd import numpy as np from math import * from datetime import datetime import math from pvlib.solarposition import * from pvlib.atmosphere import * from pvlib.clearsky import * from pvlib.irradiance import * # from bird_clear_sky_model import * from sklearn.metrics import * # import seaborn as sns; # # sns.set() # import skill_metrics as sm from scipy import stats from datetime import datetime import time from time import strptime, strftime, mktime, gmtime from calendar import timegm def valid_datetime(date_time): """ Checks to make sure the datetime received from the platform is valid. Parameters ---------- date_time: 'Pandas DatetimeIndex' Current time. Usually a contains a year, month, day, hour, and minute. Returns ------- valid_time: 'datetimeindex' Current time. If 'valid_datetime' receives an invalid input, one is assumed from the previous valid time given. """ if (date_time is None): valid_time = np.array([pd.Timestamp.now()]) valid_time = pd.DatetimeIndex(valid_time).round('min') return valid_time elif (isinstance(date_time, datetime) == True): valid_time = np.array([pd.to_datetime(date_time)]) valid_time = pd.DatetimeIndex(valid_time) return valid_time elif (isinstance(date_time, pd.DatetimeIndex) == True): return date_time def get_cs_transmit(zenith, airmass_relative, aod380, aod500, precipitable_water, ozone=0.3, pressure=101325., dni_extra=1364., asymmetry=0.85, albedo=0.2): """ Calculats clear-sky transmittance to be used in the application. Parameters ---------- zenith: 'Numpy array' A fake SZA array to calaculate transmittance. airmass_relative: 'Numpy array' A fake airmass to calculate transmittance. aod380: 'Float' Aerosol optical depth @ 380 nm. aod500: 'Float' Aerosol optical depth @ 500 nm. precipitable_water: 'Float' Annual average precipitable water for SRRL. Units: cm. ozone: 'Float' Annual average ozone concentration for SRRL. Units: cm. pressure: 'Float' Avearage sea-level pressure. Units: Pa. dni_extra: 'Float' Aveage extraterrestrial @ TOA. Units: W/m^2 asymmetry: 'Float' Asymmetry parameter albedo: 'Float' Surface albedo. Returns ------- irrads: 'Ordered Dictionary' Contains clear-sky GHI, DNI, DHI, and transmittance. Really only the transmittance is used. """ etr = dni_extra # extraradiation ze_rad = np.deg2rad(zenith) # zenith in radians airmass = airmass_relative # Bird clear sky model am_press = atmosphere.absoluteairmass(airmass, pressure) t_rayleigh = ( np.exp(-0.0903 * am_press ** 0.84 * ( 1.0 + am_press - am_press ** 1.01 )) ) am_o3 = ozone * airmass t_ozone = ( 1.0 - 0.1611 * am_o3 * (1.0 + 139.48 * am_o3) ** -0.3034 - 0.002730 * am_o3 / (1.0 + 0.044 * am_o3 + 0.0003 * am_o3 ** 2.0) ) t_gases = np.exp(-0.0127 * am_press ** 0.26) am_h2o = airmass * precipitable_water t_water = ( 1.0 - 2.4959 * am_h2o / ( (1.0 + 79.034 * am_h2o) ** 0.6828 + 6.385 * am_h2o ) ) bird_huldstrom = atmosphere.bird_hulstrom80_aod_bb(aod380, aod500) t_aerosol = np.exp( -(bird_huldstrom ** 0.873) * (1.0 + bird_huldstrom - bird_huldstrom ** 0.7088) * airmass ** 0.9108 ) taa = 1.0 - 0.1 * (1.0 - airmass + airmass ** 1.06) * (1.0 - t_aerosol) rs = 0.0685 + (1.0 - asymmetry) * (1.0 - t_aerosol / taa) id_ = 0.9662 * etr * t_aerosol * t_water * t_gases * t_ozone * t_rayleigh ze_cos = np.where(zenith < 90, np.cos(ze_rad), 0.0) id_nh = id_ * ze_cos ias = ( etr * ze_cos * 0.79 * t_ozone * t_gases * t_water * taa * (0.5 * (1.0 - t_rayleigh) + asymmetry * (1.0 - (t_aerosol / taa))) / ( 1.0 - airmass + airmass ** 1.02 ) ) gh = (id_nh + ias) / (1.0 - albedo * rs) diffuse_horiz = gh - id_nh transmit = t_aerosol * t_water * t_gases * t_ozone * t_rayleigh # TODO: be DRY, use decorator to wrap methods that need to return either # OrderedDict or DataFrame instead of repeating this boilerplate code irrads = OrderedDict() irrads['direct_horizontal'] = id_nh irrads['ghi'] = gh irrads['dni'] = id_ irrads['dhi'] = diffuse_horiz irrads['clear_transmit'] = transmit if isinstance(irrads['dni'], pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads def time_to_forecast(sunrise, sunset, valid_time, timezone): """ Checks to see if the sun is up so that a GHI forecast can be made. Parameters ---------- sunrise: 'Pandas DatetimeIndex' Sunrise for this particular day, created using the 'get_sun_rise_set_transit' module in PVlib. sunset: 'Pandas DatetimeIndex' Sunset for this particular day, created using the 'get_sun_rise_set_transit' module in PVlib. valid_time: 'datetimeindex' Current time. timezone: 'tz' Timezone of current location. Returns ------- to_forecast: 'Boolean' Boolean True or False. True means a forecast can be made. False means the sun is still set, and that a forecast should not be made. """ # Is it DST? dst = time.localtime().tm_isdst # current_tz = datetime.strftime("%z", gmtime()) # Adjust for timezone. if (dst == 0): adj_sunrise = pd.DatetimeIndex(sunrise).tz_localize(timezone) - pd.Timedelta(hours=1) adj_sunset = pd.DatetimeIndex(sunset).tz_localize(timezone) - pd.Timedelta(hours=1) # adj_time = pd.DatetimeIndex(valid_time).tz_localize(strftime("%z", gmtime())) adj_time = valid_time.tz_localize(timezone) - pd.Timedelta(hours=1) else: adj_sunrise = pd.DatetimeIndex(sunrise).tz_localize(timezone) adj_sunset = pd.DatetimeIndex(sunset).tz_localize(timezone) adj_time = valid_time.tz_localize(timezone) print(adj_sunrise,adj_time, adj_sunset) if (adj_sunrise <= adj_time < adj_sunset): to_forecast = True else: to_forecast = False return to_forecast def valid_ghi(ghi_obs, latest_ghi): """ Checks to make sure the GHI observation received from the platfrom is valid. Parameters ---------- ghi_obs: 'Pandas Series object' Current GHI observation. Units: W/m^2 Returns ------- valid_ghi: 'Pandas series object' Current GHI observation. If 'valid_ghi' receives and invalid input, one is calculated using the persistence model and a valid datetimeindex. """ if (ghi_obs is None): # Assume the persistence mdoel ghi_obs = persistence_model(latest_ghi) elif (isinstance(ghi_obs, pd.Series) == True): return ghi_obs def persistence_model(latest_ghi): """ Creates a persistence forecast when the software fails to receive a valid GHI observation. Can also be used if one simply desires a persistence forecast. Parameters ---------- latest_ghi: 'Pandas Series object' Current time. Usually contains a year, month, day, hour, and minute. Returns ------- persist_ghi: 'Pandas Series object' Persistence forecast for inputed date time. """ persist_ghi = latest_ghi.copy() return persist_ghi def last_valid_ghi(ghi_obs): """ Saves the last valid GHI observation. Uses can vary, but the important one is that it can be used in case the software fails to receive a valid GHI observation. Parameters ---------- ghi_obs: 'Pandas Series object' Current GHI observation. Units: W/m^2 Returns ------- latest_ghi: 'Pandas Series object' Latest GHI observation saved. Unites: W/m^2 """ latest_ghi = ghi_obs return latest_ghi def valid_sza_data(sza_data): """ Checkes to see if the SPA data is valid. This mainly concerns the SZA, and if it is greater than 87 degress, return np.nan, and thus no forecast. Parameters ---------- sza_data: 'Pandas DataFrame object' Returns ------- sza_valid: 'Pandas DataFrame object' Valid SPA data. If the solar zenith angle is greater than 87 degrees, then a np.nan is returned, and no forecast is generated. """ sza_valid = [] if (sza_data['elevation'].iloc[0] < 7): sza_data['elevation'].iloc[0] = np.nan sza_valid = sza_data.copy() else: sza_valid = sza_data.copy() return sza_valid def future_data(valid_time, apparent_zenith, lat, lon, altitude, aod380, aod500, precipitable_water, ozone, pressure, asymmetry, albedo): """ Calculates the necessary variables for the future time period, so that a GHI forecast can be made. Parameters ---------- valid_time: 'Pandas DatetimeIndex' Current time. apparent_zenith: 'Pandas Series object' Apparent solar zenith angle generated by PVlib. Units: degrees lat: 'float' Latitude of site lon: 'float Longitude of site altitude: 'float' Altitude of site. Units: m Returns ------- future_apparent_sza: 'Pandas Series object' Apparent solar zenith angle in the future time period: Units: degrees future_clearsky_ghi: 'Pandas Series object' Future clear-sky GHI. Unites: W/m^2 """ # Calculate future solar zenith angle # Need to calculate a future SZA. future_time = valid_time + pd.DateOffset(minutes=30) sza_data_future = spa_python(future_time, lat, lon, altitude) future_apparent_sza = valid_sza_data(sza_data_future) future_apparent_sza = sza_data_future['apparent_zenith'] # Future DNI future_ext = get_extra_radiation(future_time, epoch_year=future_time.year, method='nrel', solar_constant=1366.1) future_ext = pd.Series(future_ext) # Calculate relative and absolute airmass future_airmass = get_relative_airmass(future_apparent_sza, model='kasten1966') ghi_a_airmass = get_absolute_airmass(future_airmass, pressure=pressure) # Alternate way to calculate Linke turbidity bird_aod = bird_hulstrom80_aod_bb(aod380=aod380, aod500=aod500) kasten_linke2 = kasten96_lt(ghi_a_airmass, precipitable_water=precipitable_water, aod_bb=bird_aod) # Calculate future clear-sky GHI # Bird Clear-sky GHI model cs_ineichen_perez = ineichen(future_apparent_sza, airmass_absolute=ghi_a_airmass, linke_turbidity=kasten_linke2, altitude=altitude, dni_extra=future_ext) #cs_ineichen_perez['direct_horizontal'] = cs_ineichen_perez['dni'] * np.cos(np.radians(future_apparent_sza)) future_clearsky_ghi = cs_ineichen_perez['ghi'] # Convert the time variables into Pandas Series objects future_time = pd.Series(future_time) future_time.index = future_clearsky_ghi.index # Gather all the data into one dataframe. May have to play with data formats # a bit to get everything to work. future_df = pd.concat([future_apparent_sza, future_clearsky_ghi, future_time], axis=1) future_df.columns = ['Future_Apparent_SZA', 'Future_Clearsky_GHI', 'Future_Time'] return future_df def ghi_forecast(valid_time, ghi_obs, cs_transmit, clearsky_ghi, clearsky_dni, dni, zenith, future_zenith, future_cs_ghi, future_time, albedo): """ Calculates a GHI forecast based on Xie and Liu 2013. Parameters ---------- valid_time: 'Pandas DatetimeIndex' Current time. valid_ghi: 'Pandas Series object' Current GHI observation. Units: W/m^2 clear_transmit: 'Pandas Series object' Clear-sky transmittance. clearsky_ghi: 'Pandas Series object' Clear-sky GHI generated by the Bird model. Units: W/m^2 clearsky_dni: 'Pandas Series object' Clear-sky DNI generated by the Bird model. Units: W/m^2 dni: 'Pandas Series object' DNI generated by the DISC model. Units: W/m^2 apparent_zenith: 'Pandas Series object' Apparent solar zenith angle generated by PVlib. Units: degrees future_zenith: 'Pandas Series object' Future apparent solar zenith angle generated by PVlib. Units: degrees albedo: 'float' Surface albedo """ # Try some data stuff ghi_obs = np.array(ghi_obs) cs_transmit = np.array(cs_transmit) clearsky_ghi = np.array(clearsky_ghi) clearsky_dni = np.array(clearsky_dni) dni = np.array(dni) zenith = np.array(zenith) future_zenith = np.array(future_zenith) future_cs_ghi = np.array(future_cs_ghi) # Finish transmittance calculation transmit = cs_transmit ** 2 # Upwelling shortwave radiation ghi_up = albedo * ghi_obs # It is now possible to calculate B1 and B2 B1 = (clearsky_ghi - ghi_obs) / (clearsky_ghi - ghi_up * transmit) B2 = (clearsky_dni - dni) / clearsky_dni b_final = B1 / B2 # In order to continue, and calculate cloud fraction and GHI, we have to # compute cloud albedo. These values change depending on the values of B1 # and B2. Thus, an if else statement if appropriate. # Initial GHI is no longer computed, as it isn't needed anymore for the forecast. # It was initially used for the reconstructed ratios. if (0 <= abs(b_final) <= 0.07): cloud_albedo = 0 cloud_fraction = 0 elif (0.07 < b_final < 0.07872): cloud_albedo = 0 cloud_fraction = 0 elif (0.07872 <= b_final <= 0.11442): cloud_albedo = 1 - 31.1648 * (b_final) + np.sqrt(((31.1648 * (b_final)) ** 2 - 49.6255 * (b_final))) cloud_fraction = B1 / cloud_albedo elif (0.114422 < b_final <= 0.185): cloud_albedo = ((2.61224 * B1 - B2 + np.sqrt((24.2004 * B1 ** 2) - (9.0098 * B1 * B2) + B2 ** 2)) / (18.3622 * B1 - 4 * B2)) cloud_fraction = B1 / cloud_albedo elif (0.185 < b_final <= 0.23792): cloud_albedo = 0.89412 * (b_final) + 0.02519 cloud_fraction = B1 / cloud_albedo elif (0.23792 < b_final <= 1.0): cloud_albedo = b_final cloud_fraction = B1 / cloud_albedo else: cloud_albedo = b_final cloud_fraction = B1 / cloud_albedo # Now we can calculate cloud optical thickness for the next 30 min cloud_fraction_persist = cloud_fraction sza_thick = np.cos(np.radians(zenith)) cloud_thick = (2 * cloud_albedo * sza_thick / ((1 - cloud_albedo) * (1 - 0.86))) b = 0.5 - (0.5 * 0.86) # Need to calculate a future SZA. sza_valid_future = future_zenith sza_thick_future = np.cos(np.radians(sza_valid_future)) cloud_albedo_future = (((b * cloud_thick) / sza_thick_future) / (1 + (b * cloud_thick) / sza_thick_future)) # Set some reasonable limits for cloud albedo and cloud fraction. cloud_albedo_future = np.array([cloud_albedo_future]) cloud_albedo_future[cloud_albedo_future < 0] = 0 cloud_albedo_future[cloud_albedo_future > 1] = 1 cloud_fraction_persist = np.array([cloud_fraction_persist]) cloud_fraction_persist[cloud_fraction_persist < 0] = 0 cloud_fraction_persist[cloud_fraction_persist > 1] = 1 # Calculate GHI cloud for next time step future_clearsky_ghi = future_cs_ghi future_cloudysky_ghi = (1 - cloud_albedo_future) * future_clearsky_ghi F1f = (cloud_fraction_persist * future_cloudysky_ghi) + ((1 - cloud_fraction_persist) * future_clearsky_ghi) ghi_forecast_v3 = F1f * (1 - albedo * cloud_albedo_future * cloud_fraction_persist * transmit) ** -1 # Return the final forecast final_ghi_forecast = ghi_forecast_v3 final_ghi_forecast =
pd.Series(final_ghi_forecast[0], index=future_time)
pandas.Series
import pandas as pd import matplotlib.pyplot as plt from collections import Counter df = pd.read_csv("data/nebraska_tweets_0323-0327_labeled_edges.csv") # plot total vol def plot_total_vol(df): df_plot = df df_plot['count'] = 1 df_plot = df_plot.set_index(pd.DatetimeIndex(df_plot['created_at'])).resample('H').sum()[['count']] df_plot.plot(title='Total Tweet Volume per Hour') plt.xlabel("Time") plt.ylabel("Tweet Volume") plt.show() def plot_content_vol(df): df_plot = pd.get_dummies(df[['content_label', 'created_at']], prefix=[None], columns=['content_label']) df_plot = df_plot.set_index(pd.DatetimeIndex(df_plot['created_at'])).resample('H').sum() df_plot.plot(title='Content Tweet Volume per Hour') plt.xlabel("Time") plt.ylabel("Tweet Volume") plt.show() def plot_sentiment_vol(df): df_plot =
pd.get_dummies(df[['sentiment_label', 'created_at']], prefix=[None], columns=['sentiment_label'])
pandas.get_dummies
# -*- coding: utf-8 -*- """ Created on Wed May 17 10:29:39 2017 @author: yinonbaron """ # -*- coding: utf-8 -*- """ Created on Tue May 16 13:51:01 2017 @author: yinonbaron """ import matplotlib.pyplot as plt import numpy as np import pandas as pd import os # Get the path of the script file_path = os.path.dirname(os.path.realpath(__file__)) data = pd.DataFrame(index = ['plants','fungi','protists','animals','bacteria','archaea'],columns=['terrestrial','marine','deep subsurface']) fig1 = pd.read_excel(file_path + '/../../results.xlsx', 'Table1 & Fig1',index_col=(0,1)) fig2a =
pd.read_excel(file_path + '/../../results.xlsx', 'Fig2A',index_col=(0,1))
pandas.read_excel
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jun 9 09:58:22 2020 @author: lenakilian """ import tabula import pandas as pd import numpy as np use_for_io = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.2', '1.1.3.1', '1.1.3.2', '1.1.4', '1.1.5', '1.1.6', '1.1.7', '1.1.8', '1.1.9', '1.1.10.1', '1.1.10.2', '1.1.10.3', '1.1.10.4', '1.1.11.1', '1.1.11.2', '1.1.11.3', '1.1.12.1', '1.1.12.2', '1.1.12.3', '1.1.13', '1.1.14', '1.1.15.1', '1.1.15.2', '1.1.16', '1.1.17', '1.1.18.1', '1.1.18.2', '1.1.19.1', '1.1.19.2', '1.1.19.3', '1.1.19.4', '1.1.19.5', '1.1.19.6', '1.1.20', '1.1.21', '1.1.22', '1.1.23.1', '1.1.23.2', '1.1.23.3', '1.1.23.4', '1.1.24', '1.1.25', '1.1.26', '1.1.27', '1.1.28.1', '1.1.28.2', '1.1.29', '1.1.30', '1.1.31', '1.1.32', '1.1.33.1', '1.1.33.2', '1.1.33.3', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '2.1.1', '2.1.2.1', '2.1.2.2', '2.1.2.3', '2.1.3.1', '2.1.3.2', '2.1.4', '2.2.1', '2.2.2.1', '2.2.2.2', '3.1.1', '3.1.2', '3.1.3', '3.1.4', '3.1.5', '3.1.6', '3.1.7', '3.1.8', '3.1.9.1', '3.1.9.2', '3.1.9.3', '3.1.9.4', '3.1.10', '3.1.11.1', '3.1.11.2', '3.2.1', '3.2.2', '3.2.3', '3.2.4', '4.1.1', '4.1.2', '4.2.1', '4.2.2', '4.2.3', '4.2.4', '4.3.1', '4.3.2', '4.3.3', '4.4.1', '4.4.2', '4.4.3.1', '4.4.3.2', '4.4.3.3', '5.1.1.1', '5.1.1.2', '5.1.1.3', '5.1.2.1', '5.1.2.2', '5.2.1', '5.2.2', '5.3.1', '5.3.2', '5.3.3', '5.3.4', '5.3.5', '5.3.6', '5.3.7', '5.3.8', '5.3.9', '5.4.1', '5.4.2', '5.4.3', '5.4.4', '5.5.1', '5.5.2', '5.5.3', '5.5.4', '5.5.5', '5.6.1.1', '5.6.1.2', '5.6.2.1', '5.6.2.2', '5.6.2.3', '5.6.2.4', '5.6.3.1', '5.6.3.2', '5.6.3.3', '6.1.1.1', '6.1.1.2', '6.1.1.3', '6.1.1.4', '6.1.2.1', '6.1.2.2', '6.2.1.1', '6.2.1.2', '6.2.1.3', '6.2.2', '7.1.1.1', '7.1.1.2', '7.1.2.1', '7.1.2.2', '7.1.3.1', '7.1.3.2', '7.1.3.3', '7.2.1.1', '7.2.1.2', '7.2.1.3', '7.2.1.4', '7.2.2.1', '7.2.2.2', '7.2.2.3', '7.2.3.1', '7.2.3.2', '7.2.4.1', '7.2.4.2', '7.2.4.3', '7.2.4.4', '7.2.4.5', '7.3.1.1', '7.3.1.2', '7.3.2.1', '7.3.2.2', '7.3.3.1', '7.3.3.2', '7.3.4.1', '7.3.4.2', '7.3.4.3', '7.3.4.4', '7.3.4.5', '7.3.4.6', '7.3.4.7', '7.3.4.8', '8.1', '8.2.1', '8.2.2', '8.2.3', '8.3.1', '8.3.2', '8.3.3', '8.3.4', '8.4', '9.1.1.1', '9.1.1.2', '9.1.2.1', '9.1.2.2', '9.1.2.3', '9.1.2.4', '9.1.2.5', '9.1.2.6', '9.1.2.7', '9.1.2.8', '9.1.2.9', '9.1.3.1', '9.1.3.2', '9.1.3.3', '9.2.1', '9.2.2', '9.2.3', '9.2.4', '9.2.5', '9.2.6', '9.2.7', '9.2.8', '9.3.1', '9.3.2.1', '9.3.2.2', '9.3.3', '9.3.4.1', '9.3.4.2', '9.3.4.3', '9.3.4.4', '9.3.5.1', '9.3.5.2', '9.3.5.3', '9.4.1.1', '9.4.1.2', '9.4.1.3', '9.4.1.4', '9.4.1.5', '9.4.2.1', '9.4.2.2', '9.4.2.3', '9.4.3.1', '9.4.3.2', '9.4.3.3', '9.4.3.4', '9.4.3.5', '9.4.3.6', '9.4.4.1', '9.4.4.2', '9.4.4.3', '9.4.5', '9.4.6.1', '9.4.6.2', '9.4.6.3', '9.4.6.4', '9.5.1', '9.5.2', '9.5.3', '9.5.4', '9.5.5', '10.1', '10.2', '11.1.1', '11.1.2', '11.1.3', '192.168.127.12', '172.16.31.10', '192.168.3.11', '172.16.17.32', '11.1.5', '172.16.17.32', '172.16.17.32', '11.2.1', '11.2.2', '11.2.3', '12.1.1', '12.1.2', '192.168.3.11', '172.16.17.32', '172.16.17.32', '12.1.4', '172.16.17.32', '172.16.31.10', '172.16.17.32', '172.16.58.3', '172.16.58.3', '172.16.17.32', '172.16.17.32', '192.168.127.12', '192.168.3.11', '192.168.3.11', '192.168.3.11', '192.168.127.12', '192.168.3.11', '192.168.127.12', '172.16.31.10', '172.16.17.32', '12.4.2', '192.168.3.11', '172.16.58.3', '12.4.4', '172.16.58.3', '172.16.31.10', '192.168.3.11', '192.168.3.11', '192.168.127.12', '172.16.31.10', '172.16.31.10', '172.16.17.32', '172.16.17.32', '192.168.3.11', '192.168.3.11', '172.16.17.32', '172.16.58.3', '13.1.1', '13.1.2', '13.1.3', '13.2.1', '13.2.2', '13.2.3', '13.3.1', '13.3.2', '192.168.3.11', '172.16.17.32', '172.16.31.10', '172.16.17.32', '172.16.31.10', '172.16.31.10', '172.16.58.3', '172.16.31.10', '14.1.1', '14.1.2', '14.1.3', '14.2', '14.3.1', '14.3.2', '14.3.3', '14.3.4', '14.3.5', '14.3.6', '14.3.7', '14.4.1', '14.4.2', '14.5.1', '14.5.2', '14.5.3', '14.5.4', '14.5.5', '14.5.6', '14.5.7', '14.5.8', '14.6.1', '14.6.2', '14.6.3', '14.7', '14.8'] # import specs documentation years = ['2001-2002', '2002-2003', '2003-2004', '2004-2005', '2005-2006', '2006', '2007', '2009', '2010', '2013', '2014', '2015-2016', '2016-2017'] # save first 2 as excel --> come in PDF pages = ['261-277', '203-219']; names = ['4697userguide1.pdf', '5003userguide3.pdf'] for j in range(2): file = 'LCFS/'+ years[j] + '/mrdoc/pdf/' + names[j] temp = tabula.io.read_pdf(file, pages = pages[j], multiple_tables = True, pandas_options={'header':None}) writer = pd.ExcelWriter('LCFS/'+ years[j] + '/mrdoc/excel/specs.xlsx') for i in range(len(temp)): temp[i].to_excel(writer, 'Sheet ' + str(i)) writer.save() names = ['specs.xlsx', 'specs.xlsx', '5210spec2003-04.xls', '5375tablea1spec2004-05.xls', '5688_specification_family_spending_EFS_2005-06.xls', '5986_spec2006_userguide.xls', '6118_spec2007_userguide.xls', '6655spec2009_v2.xls', '6945spec2010.xls', '7702_2013_specification.xls', '7992_spec2014.xls', '8210_spec_2015-16.xls', '8351_spec2016-17.xls'] specs = {} for j in range(len(years)): specs[int(years[j][:4])] = pd.read_excel('LCFS/'+ years[j] + '/mrdoc/excel/' + names[j], sheet_name=None, header=None) cleaned_specs = {} for year in list(specs.keys())[2:]: cleaned_specs[year] = {} i = 0 for item in list(specs[year].keys()): cleaned_specs[year][item] = specs[year][item] if 'Family Spending' in item: pass elif 'changes' in item: pass else: if 'FS code' in cleaned_specs[year][item].iloc[:, 1].tolist(): cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:] if 'FS code' in cleaned_specs[year][item].iloc[:, 0].tolist(): cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:] if 'FS codes' in cleaned_specs[year][item].iloc[:, 0].tolist(): cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:] cleaned_specs[year][item] = cleaned_specs[year][item].loc[cleaned_specs[year][item].iloc[:, 0] != 'FS code']\ .dropna(axis=0, how='all').dropna(axis=1, how='all') cleaned_specs[year][item] = cleaned_specs[year][item].loc[cleaned_specs[year][item].iloc[:, 0] != 'Variable'] if 'Alcohol' in item or 'Clothing' in item: if len(cleaned_specs[year][item].columns) > 6: cleaned_specs[year][item] = cleaned_specs[year][item].dropna(axis=1, how='all') if len(cleaned_specs[year][item].columns) > 6: cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, :-1] else: if len(cleaned_specs[year][item].columns) > 6: cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, :-1] if len(cleaned_specs[year][item].columns) > 6: cleaned_specs[year][item] = cleaned_specs[year][item].dropna(axis=1, how='all') cleaned_specs[year][item].columns = ['LCFS_1', 'COIPLUS_1', 'Desc_1', 'LCFS_2', 'COIPLUS_2', 'Desc_2'] cleaned_specs[year][item].loc[cleaned_specs[year][item]['LCFS_1'].str.len() > 90, 'LCFS_1'] = np.nan cleaned_specs[year][item] = cleaned_specs[year][item].dropna(how='all') for j in range(1, 3): cleaned_specs[year][item].loc[ cleaned_specs[year][item]['COIPLUS_' + str(j)].str[-1] == '.', 'COIPLUS_' + str(j)] = cleaned_specs[year][item]['COIPLUS_' + str(j)].str[:-1] if i == 0: cleaned_specs[year]['all'] = cleaned_specs[year][item] i += 1 else: cleaned_specs[year]['all'] = cleaned_specs[year]['all'].append(cleaned_specs[year][item]) writer = pd.ExcelWriter('LCFS/lcfs_coiplus_lookup.xlsx') check_specs = all_specs = {year:cleaned_specs[year]['all'].dropna(how='all') for year in list(specs.keys())[2:]} new_specs = {} no_rooms = ['A114', 'a114', 'a114', 'a114', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p'] room_dict = dict(zip([int(x[:4]) for x in years], no_rooms)) for year in list(check_specs.keys()): check_specs[year].index = list(range(len(check_specs[year]))) check_specs[year].loc[check_specs[year]['COIPLUS_2'].isnull() == True, 'COIPLUS_2'] = check_specs[year]['COIPLUS_1'] for i in range(1, 3): if i == 1: temp = check_specs[year][['LCFS_1', 'LCFS_2', 'COIPLUS_1', 'Desc_1']] temp.loc[temp['LCFS_1'].isnull() == True, 'LCFS_1'] = temp['LCFS_2'] else: temp = all_specs[year][['LCFS_2', 'COIPLUS_2', 'Desc_2']] temp.index = list(range(len(temp))) temp2 = temp['COIPLUS_' + str(i)].tolist(); temp3 = temp['Desc_' + str(i)].tolist() for j in range(1, len(temp2)): if pd.isnull(temp2[j]) == True: temp2[j] = temp2[j-1]; temp3[j] = temp3[j-1] temp['COIPLUS_all'] = temp2; temp['Desc_all'] = temp3 temp = temp[['LCFS_' + str(i), 'COIPLUS_all', 'Desc_all']].apply(lambda x: x.astype(str)) temp = temp.set_index(['COIPLUS_all', 'Desc_all']).groupby('COIPLUS_all')['LCFS_' + str(i)].transform(lambda x: '+'.join(x)).drop_duplicates() temp.columns = 'LCFS' if i == 1: new_specs[year] = temp else: new_specs[year] = new_specs[year].append(temp).reset_index() new_specs[year].columns = ['COIPLUS', 'Description', 'LCFS_Code'] new_specs[year]['LCFS_Code'] = [x.replace(' ', '').replace('nan+', '')\ .replace('+nan', '').replace('nan', '')\ .replace('++', '+').replace('+-', '-')\ .replace('-', '-1*') for x in new_specs[year]['LCFS_Code'].tolist()] new_specs[year]['COIPLUS'] = [x.split(' ')[0] for x in new_specs[year]['COIPLUS'].tolist()] new_specs[year] = new_specs[year].loc[new_specs[year]['COIPLUS'] != 'nan'] new_specs[year]['Level_1'] = [pd.to_numeric(x.split('.')[0], errors='coerce') for x in new_specs[year]['COIPLUS'].tolist()] for i in range(2, 5): temp = [] for x in new_specs[year]['COIPLUS'].tolist(): if len(x.split('.')) > i-1: temp.append(pd.to_numeric(x.split('.')[i-1], errors='coerce')) else: temp.append(0) new_specs[year]['Level_' + str(i)] = temp new_specs[year].loc[new_specs[year]['LCFS_Code'].str[-1] == '+', 'LCFS_Code'] = new_specs[year]['LCFS_Code'].str[:-1] new_specs[year] = new_specs[year].set_index(['Level_1', 'Level_2', 'Level_3', 'Level_4']).sort_index().drop_duplicates() new_specs[year].loc[new_specs[year]['COIPLUS'] == '4.1.2', 'Description'] = 'Imputed Rent' new_specs[year].loc[new_specs[year]['COIPLUS'] == '4.1.2', 'LCFS_Code'] = 'owned_prop*' + room_dict[year] new_specs[year] = new_specs[year].loc[new_specs[year]['Description'] != 'nan'] new_specs[year] = new_specs[year].loc[new_specs[year]['LCFS_Code'] != ''] new_specs[year]['IO_use'] = False new_specs[year].loc[new_specs[year]['COIPLUS'].isin( use_for_io) == True, 'IO_use'] = True new_specs[year].loc[new_specs[year][ 'Description'] != 'Stationery, diaries, address books, art materials'] new_specs[year]['Description'] = new_specs[year]['Description'].str.replace(' and ', ' & ') new_specs[year] = new_specs[year].drop_duplicates() new_specs[year].to_excel(writer, str(year)) writer.save() check = new_specs[2003].loc[new_specs[2003]['IO_use'] == True] # missing # 8.4 - Internet Subscription Fees - 9.4.3.7 in coiplus desc_anne_john = pd.read_excel('LCFS/lcfs_desc_anne&john.xlsx', header=None) desc_anne_john['COICOP'] = [x.split(' ')[0] for x in desc_anne_john[0]] desc_anne_john['Description_AJ'] = [' '.join(x.split(' ')[1:]) for x in desc_anne_john[0]] coicop_anne_john =
pd.read_excel('LCFS/lcfs_coicop_lookup_anne&john.xlsx', sheet_name=None)
pandas.read_excel
from bs4 import BeautifulSoup import nltk nltk.download('wordnet') nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import pandas as pd import pickle import string import json import random import os def load_data(raw=None): """ load data to development workspaces Parameters -------------- raw: (bool) if True, function returns cleaned dataset. return (df) data frame of data and its labels """ raw_data = [] with open('./data/cyber_data.json') as f: for line in f: raw_data.append(json.loads(line)) labels = [int(d['annotation']['label'][0]) for d in raw_data] text = [d['content'] for d in raw_data] data = {'text': text, 'label': labels} df =
pd.DataFrame(data, columns=['text', 'label'])
pandas.DataFrame
# -*- coding: utf-8 -*- from datetime import timedelta import operator from string import ascii_lowercase import warnings import numpy as np import pytest from pandas.compat import lrange import pandas.util._test_decorators as td import pandas as pd from pandas import ( Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna, to_datetime, to_timedelta) import pandas.core.algorithms as algorithms import pandas.core.nanops as nanops import pandas.util.testing as tm def assert_stat_op_calc(opname, alternative, frame, has_skipna=True, check_dtype=True, check_dates=False, check_less_precise=False, skipna_alternative=None): """ Check that operator opname works as advertised on frame Parameters ---------- opname : string Name of the operator to test on frame alternative : function Function that opname is tested against; i.e. "frame.opname()" should equal "alternative(frame)". frame : DataFrame The object that the tests are executed on has_skipna : bool, default True Whether the method "opname" has the kwarg "skip_na" check_dtype : bool, default True Whether the dtypes of the result of "frame.opname()" and "alternative(frame)" should be checked. check_dates : bool, default false Whether opname should be tested on a Datetime Series check_less_precise : bool, default False Whether results should only be compared approximately; passed on to tm.assert_series_equal skipna_alternative : function, default None NaN-safe version of alternative """ f = getattr(frame, opname) if check_dates: df = DataFrame({'b': date_range('1/1/2001', periods=2)}) result = getattr(df, opname)() assert isinstance(result, Series) df['a'] = lrange(len(df)) result = getattr(df, opname)() assert isinstance(result, Series) assert len(result) if has_skipna: def wrapper(x): return alternative(x.values) skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper), check_dtype=check_dtype, check_less_precise=check_less_precise) # HACK: win32 tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), check_dtype=False, check_less_precise=check_less_precise) else: skipna_wrapper = alternative result0 = f(axis=0) result1 = f(axis=1) tm.assert_series_equal(result0, frame.apply(skipna_wrapper), check_dtype=check_dtype, check_less_precise=check_less_precise) if opname in ['sum', 'prod']: expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal(result1, expected, check_dtype=False, check_less_precise=check_less_precise) # check dtypes if check_dtype: lcd_dtype = frame.values.dtype assert lcd_dtype == result0.dtype assert lcd_dtype == result1.dtype # bad axis with pytest.raises(ValueError, match='No axis named 2'): f(axis=2) # all NA case if has_skipna: all_na = frame * np.NaN r0 = getattr(all_na, opname)(axis=0) r1 = getattr(all_na, opname)(axis=1) if opname in ['sum', 'prod']: unit = 1 if opname == 'prod' else 0 # result for empty sum/prod expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) tm.assert_series_equal(r0, expected) expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) tm.assert_series_equal(r1, expected) def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False): """ Check that API for operator opname works as advertised on frame Parameters ---------- opname : string Name of the operator to test on frame float_frame : DataFrame DataFrame with columns of type float float_string_frame : DataFrame DataFrame with both float and string columns has_numeric_only : bool, default False Whether the method "opname" has the kwarg "numeric_only" """ # make sure works on mixed-type frame getattr(float_string_frame, opname)(axis=0) getattr(float_string_frame, opname)(axis=1) if has_numeric_only: getattr(float_string_frame, opname)(axis=0, numeric_only=True) getattr(float_string_frame, opname)(axis=1, numeric_only=True) getattr(float_frame, opname)(axis=0, numeric_only=False) getattr(float_frame, opname)(axis=1, numeric_only=False) def assert_bool_op_calc(opname, alternative, frame, has_skipna=True): """ Check that bool operator opname works as advertised on frame Parameters ---------- opname : string Name of the operator to test on frame alternative : function Function that opname is tested against; i.e. "frame.opname()" should equal "alternative(frame)". frame : DataFrame The object that the tests are executed on has_skipna : bool, default True Whether the method "opname" has the kwarg "skip_na" """ f = getattr(frame, opname) if has_skipna: def skipna_wrapper(x): nona = x.dropna().values return alternative(nona) def wrapper(x): return alternative(x.values) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper)) tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), check_dtype=False) # HACK: win32 else: skipna_wrapper = alternative wrapper = alternative result0 = f(axis=0) result1 = f(axis=1) tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False) # bad axis with pytest.raises(ValueError, match='No axis named 2'): f(axis=2) # all NA case if has_skipna: all_na = frame * np.NaN r0 = getattr(all_na, opname)(axis=0) r1 = getattr(all_na, opname)(axis=1) if opname == 'any': assert not r0.any() assert not r1.any() else: assert r0.all() assert r1.all() def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, has_bool_only=False): """ Check that API for boolean operator opname works as advertised on frame Parameters ---------- opname : string Name of the operator to test on frame float_frame : DataFrame DataFrame with columns of type float float_string_frame : DataFrame DataFrame with both float and string columns has_bool_only : bool, default False Whether the method "opname" has the kwarg "bool_only" """ # make sure op works on mixed-type frame mixed = float_string_frame mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5 getattr(mixed, opname)(axis=0) getattr(mixed, opname)(axis=1) if has_bool_only: getattr(mixed, opname)(axis=0, bool_only=True) getattr(mixed, opname)(axis=1, bool_only=True) getattr(bool_frame_with_na, opname)(axis=0, bool_only=False) getattr(bool_frame_with_na, opname)(axis=1, bool_only=False) class TestDataFrameAnalytics(object): # --------------------------------------------------------------------- # Correlation and covariance @td.skip_if_no_scipy def test_corr_pearson(self, float_frame): float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'pearson') @td.skip_if_no_scipy def test_corr_kendall(self, float_frame): float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'kendall') @td.skip_if_no_scipy def test_corr_spearman(self, float_frame): float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'spearman') def _check_method(self, frame, method='pearson'): correls = frame.corr(method=method) expected = frame['A'].corr(frame['C'], method=method) tm.assert_almost_equal(correls['A']['C'], expected) @td.skip_if_no_scipy def test_corr_non_numeric(self, float_frame, float_string_frame): float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan # exclude non-numeric types result = float_string_frame.corr() expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr() tm.assert_frame_equal(result, expected) @td.skip_if_no_scipy @pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman']) def test_corr_nooverlap(self, meth): # nothing in common df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan], 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1], 'C': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}) rs = df.corr(meth) assert isna(rs.loc['A', 'B']) assert isna(rs.loc['B', 'A']) assert rs.loc['A', 'A'] == 1 assert rs.loc['B', 'B'] == 1 assert isna(rs.loc['C', 'C']) @td.skip_if_no_scipy @pytest.mark.parametrize('meth', ['pearson', 'spearman']) def test_corr_constant(self, meth): # constant --> all NA df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan], 'B': [np.nan, np.nan, np.nan, 1, 1, 1]}) rs = df.corr(meth) assert isna(rs.values).all() def test_corr_int(self): # dtypes other than float64 #1761 df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}) df3.cov() df3.corr() @td.skip_if_no_scipy def test_corr_int_and_boolean(self): # when dtypes of pandas series are different # then ndarray will have dtype=object, # so it need to be properly handled df = DataFrame({"a": [True, False], "b": [1, 0]}) expected = DataFrame(np.ones((2, 2)), index=[ 'a', 'b'], columns=['a', 'b']) for meth in ['pearson', 'kendall', 'spearman']: with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) result = df.corr(meth) tm.assert_frame_equal(result, expected) def test_corr_cov_independent_index_column(self): # GH 14617 df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd")) for method in ['cov', 'corr']: result = getattr(df, method)() assert result.index is not result.columns assert result.index.equals(result.columns) def test_corr_invalid_method(self): # GH 22298 df = pd.DataFrame(np.random.normal(size=(10, 2))) msg = ("method must be either 'pearson', " "'spearman', 'kendall', or a callable, ") with pytest.raises(ValueError, match=msg): df.corr(method="____") def test_cov(self, float_frame, float_string_frame): # min_periods no NAs (corner case) expected = float_frame.cov() result = float_frame.cov(min_periods=len(float_frame)) tm.assert_frame_equal(expected, result) result = float_frame.cov(min_periods=len(float_frame) + 1) assert isna(result.values).all() # with NAs frame = float_frame.copy() frame['A'][:5] = np.nan frame['B'][5:10] = np.nan result = float_frame.cov(min_periods=len(float_frame) - 8) expected = float_frame.cov() expected.loc['A', 'B'] = np.nan expected.loc['B', 'A'] = np.nan # regular float_frame['A'][:5] = np.nan float_frame['B'][:10] = np.nan cov = float_frame.cov() tm.assert_almost_equal(cov['A']['C'], float_frame['A'].cov(float_frame['C'])) # exclude non-numeric types result = float_string_frame.cov() expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov() tm.assert_frame_equal(result, expected) # Single column frame df = DataFrame(np.linspace(0.0, 1.0, 10)) result = df.cov() expected = DataFrame(np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns) tm.assert_frame_equal(result, expected) df.loc[0] = np.nan result = df.cov() expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)), index=df.columns, columns=df.columns) tm.assert_frame_equal(result, expected) def test_corrwith(self, datetime_frame): a = datetime_frame noise = Series(np.random.randn(len(a)), index=a.index) b = datetime_frame.add(noise, axis=0) # make sure order does not matter b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:]) del b['B'] colcorr = a.corrwith(b, axis=0) tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A'])) rowcorr = a.corrwith(b, axis=1) tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0)) dropped = a.corrwith(b, axis=0, drop=True) tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A'])) assert 'B' not in dropped dropped = a.corrwith(b, axis=1, drop=True) assert a.index[-1] not in dropped.index # non time-series data index = ['a', 'b', 'c', 'd', 'e'] columns = ['one', 'two', 'three', 'four'] df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns) df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns) correls = df1.corrwith(df2, axis=1) for row in index[:4]: tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row])) def test_corrwith_with_objects(self): df1 = tm.makeTimeDataFrame() df2 = tm.makeTimeDataFrame() cols = ['A', 'B', 'C', 'D'] df1['obj'] = 'foo' df2['obj'] = 'bar' result = df1.corrwith(df2) expected = df1.loc[:, cols].corrwith(df2.loc[:, cols]) tm.assert_series_equal(result, expected) result = df1.corrwith(df2, axis=1) expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1) tm.assert_series_equal(result, expected) def test_corrwith_series(self, datetime_frame): result = datetime_frame.corrwith(datetime_frame['A']) expected = datetime_frame.apply(datetime_frame['A'].corr) tm.assert_series_equal(result, expected) def test_corrwith_matches_corrcoef(self): df1 = DataFrame(np.arange(10000), columns=['a']) df2 = DataFrame(np.arange(10000) ** 2, columns=['a']) c1 = df1.corrwith(df2)['a'] c2 = np.corrcoef(df1['a'], df2['a'])[0][1] tm.assert_almost_equal(c1, c2) assert c1 < 1 def test_corrwith_mixed_dtypes(self): # GH 18570 df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3], 'c': ['a', 'b', 'c', 'd']}) s = pd.Series([0, 6, 7, 3]) result = df.corrwith(s) corrs = [df['a'].corr(s), df['b'].corr(s)] expected = pd.Series(data=corrs, index=['a', 'b']) tm.assert_series_equal(result, expected) def test_corrwith_index_intersection(self): df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) result = df1.corrwith(df2, drop=True).index.sort_values() expected = df1.columns.intersection(df2.columns).sort_values() tm.assert_index_equal(result, expected) def test_corrwith_index_union(self): df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"]) df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"]) result = df1.corrwith(df2, drop=False).index.sort_values() expected = df1.columns.union(df2.columns).sort_values() tm.assert_index_equal(result, expected) def test_corrwith_dup_cols(self): # GH 21925 df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T) df2 = df1.copy() df2 = pd.concat((df2, df2[0]), axis=1) result = df1.corrwith(df2) expected = pd.Series(np.ones(4), index=[0, 0, 1, 2]) tm.assert_series_equal(result, expected) @td.skip_if_no_scipy def test_corrwith_spearman(self): # GH 21925 df = pd.DataFrame(np.random.random(size=(100, 3))) result = df.corrwith(df**2, method="spearman") expected = Series(np.ones(len(result))) tm.assert_series_equal(result, expected) @td.skip_if_no_scipy def test_corrwith_kendall(self): # GH 21925 df = pd.DataFrame(np.random.random(size=(100, 3))) result = df.corrwith(df**2, method="kendall") expected = Series(np.ones(len(result))) tm.assert_series_equal(result, expected) # --------------------------------------------------------------------- # Describe def test_bool_describe_in_mixed_frame(self): df = DataFrame({ 'string_data': ['a', 'b', 'c', 'd', 'e'], 'bool_data': [True, True, False, False, False], 'int_data': [10, 20, 30, 40, 50], }) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame({'int_data': [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=['bool']) expected = DataFrame({'bool_data': [5, 2, False, 3]}, index=['count', 'unique', 'top', 'freq']) tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH 13891 df = pd.DataFrame({ 'bool_data_1': [False, False, True, True], 'bool_data_2': [False, True, True, True] }) result = df.describe() expected = DataFrame({'bool_data_1': [4, 2, True, 2], 'bool_data_2': [4, 2, True, 3]}, index=['count', 'unique', 'top', 'freq']) tm.assert_frame_equal(result, expected) df = pd.DataFrame({ 'bool_data': [False, False, True, True, False], 'int_data': [0, 1, 2, 3, 4] }) result = df.describe() expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) tm.assert_frame_equal(result, expected) df = pd.DataFrame({ 'bool_data': [False, False, True, True], 'str_data': ['a', 'b', 'c', 'a'] }) result = df.describe() expected = DataFrame({'bool_data': [4, 2, True, 2], 'str_data': [4, 3, 'a', 2]}, index=['count', 'unique', 'top', 'freq']) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({'value': np.random.randint(0, 10000, 100)}) labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=['value'], ascending=True) df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=cat_labels) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'], ordered=True) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=['count', 'unique', 'top', 'freq']) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_categorical_columns(self): # GH 11558 columns = pd.CategoricalIndex(['int1', 'int2', 'obj'], ordered=True, name='XXX') df = DataFrame({'int1': [10, 20, 30, 40, 50], 'int2': [10, 20, 30, 40, 50], 'obj': ['A', 0, None, 'X', 1]}, columns=columns) result = df.describe() exp_columns = pd.CategoricalIndex(['int1', 'int2'], categories=['int1', 'int2', 'obj'], ordered=True, name='XXX') expected = DataFrame({'int1': [5, 30, df.int1.std(), 10, 20, 30, 40, 50], 'int2': [5, 30, df.int2.std(), 10, 20, 30, 40, 50]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'], columns=exp_columns) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], freq='MS', tz='US/Eastern', name='XXX') df = DataFrame({0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ['A', 0, None, 'X', 1]}) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'], freq='MS', tz='US/Eastern', name='XXX') expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == 'MS' assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH 6145 t1 = pd.timedelta_range('1 days', freq='D', periods=5) t2 = pd.timedelta_range('1 hours', freq='H', periods=5) df = pd.DataFrame({'t1': t1, 't2': t2}) expected = DataFrame({'t1': [5, pd.Timedelta('3 days'), df.iloc[:, 0].std(), pd.Timedelta('1 days'), pd.Timedelta('2 days'), pd.Timedelta('3 days'), pd.Timedelta('4 days'), pd.Timedelta('5 days')], 't2': [5, pd.Timedelta('3 hours'), df.iloc[:, 1].std(), pd.Timedelta('1 hours'), pd.Timedelta('2 hours'), pd.Timedelta('3 hours'), pd.Timedelta('4 hours'), pd.Timedelta('5 hours')]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = (" t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919 0 days 01:34:52.099788\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00") assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH 21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = pd.DataFrame({'s1': s1, 's2': s2}) expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan, 2, 1.581139, 0, 1, 2, 3, 4], 's2': [5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}, index=['count', 'unique', 'top', 'freq', 'first', 'last', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] ) result = df.describe(include='all') tm.assert_frame_equal(result, expected) # --------------------------------------------------------------------- # Reductions def test_stat_op_api(self, float_frame, float_string_frame): assert_stat_op_api('count', float_frame, float_string_frame, has_numeric_only=True) assert_stat_op_api('sum', float_frame, float_string_frame, has_numeric_only=True) assert_stat_op_api('nunique', float_frame, float_string_frame) assert_stat_op_api('mean', float_frame, float_string_frame) assert_stat_op_api('product', float_frame, float_string_frame) assert_stat_op_api('median', float_frame, float_string_frame) assert_stat_op_api('min', float_frame, float_string_frame) assert_stat_op_api('max', float_frame, float_string_frame) assert_stat_op_api('mad', float_frame, float_string_frame) assert_stat_op_api('var', float_frame, float_string_frame) assert_stat_op_api('std', float_frame, float_string_frame) assert_stat_op_api('sem', float_frame, float_string_frame) assert_stat_op_api('median', float_frame, float_string_frame) try: from scipy.stats import skew, kurtosis # noqa:F401 assert_stat_op_api('skew', float_frame, float_string_frame) assert_stat_op_api('kurt', float_frame, float_string_frame) except ImportError: pass def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame): def count(s): return notna(s).sum() def nunique(s): return len(algorithms.unique1d(s.dropna())) def mad(x): return np.abs(x - x.mean()).mean() def var(x): return np.var(x, ddof=1) def std(x): return np.std(x, ddof=1) def sem(x): return np.std(x, ddof=1) / np.sqrt(len(x)) def skewness(x): from scipy.stats import skew # noqa:F811 if len(x) < 3: return np.nan return skew(x, bias=False) def kurt(x): from scipy.stats import kurtosis # noqa:F811 if len(x) < 4: return np.nan return kurtosis(x, bias=False) assert_stat_op_calc('nunique', nunique, float_frame_with_na, has_skipna=False, check_dtype=False, check_dates=True) # mixed types (with upcasting happening) assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'), check_dtype=False, check_less_precise=True) assert_stat_op_calc('sum', np.sum, float_frame_with_na, skipna_alternative=np.nansum) assert_stat_op_calc('mean', np.mean, float_frame_with_na, check_dates=True) assert_stat_op_calc('product', np.prod, float_frame_with_na) assert_stat_op_calc('mad', mad, float_frame_with_na) assert_stat_op_calc('var', var, float_frame_with_na) assert_stat_op_calc('std', std, float_frame_with_na) assert_stat_op_calc('sem', sem, float_frame_with_na) assert_stat_op_calc('count', count, float_frame_with_na, has_skipna=False, check_dtype=False, check_dates=True) try: from scipy import skew, kurtosis # noqa:F401 assert_stat_op_calc('skew', skewness, float_frame_with_na) assert_stat_op_calc('kurt', kurt, float_frame_with_na) except ImportError: pass # TODO: Ensure warning isn't emitted in the first place @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") def test_median(self, float_frame_with_na, int_frame): def wrapper(x): if isna(x).any(): return np.nan return np.median(x) assert_stat_op_calc('median', wrapper, float_frame_with_na, check_dates=True) assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, check_dates=True) @pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']) def test_stat_operators_attempt_obj_array(self, method): # GH#676 data = { 'a': [-0.00049987540199591344, -0.0016467257772919831, 0.00067695870775883013], 'b': [-0, -0, 0.0], 'c': [0.00031111847529610595, 0.0014902627951905339, -0.00094099200035979691] } df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O') df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object) for df in [df1, df2]: assert df.values.dtype == np.object_ result = getattr(df, method)(1) expected = getattr(df.astype('f8'), method)(1) if method in ['sum', 'prod']: tm.assert_series_equal(result, expected) @pytest.mark.parametrize('op', ['mean', 'std', 'var', 'skew', 'kurt', 'sem']) def test_mixed_ops(self, op): # GH#16116 df = DataFrame({'int': [1, 2, 3, 4], 'float': [1., 2., 3., 4.], 'str': ['a', 'b', 'c', 'd']}) result = getattr(df, op)() assert len(result) == 2 with pd.option_context('use_bottleneck', False): result = getattr(df, op)() assert len(result) == 2 def test_reduce_mixed_frame(self): # GH 6806 df = DataFrame({ 'bool_data': [True, True, False, False, False], 'int_data': [10, 20, 30, 40, 50], 'string_data': ['a', 'b', 'c', 'd', 'e'], }) df.reindex(columns=['bool_data', 'int_data', 'string_data']) test = df.sum(axis=0) tm.assert_numpy_array_equal(test.values, np.array([2, 150, 'abcde'], dtype=object)) tm.assert_series_equal(test, df.T.sum(axis=1)) def test_nunique(self): df = DataFrame({'A': [1, 1, 1], 'B': [1, 2, 3], 'C': [1, np.nan, 3]}) tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2})) tm.assert_series_equal(df.nunique(dropna=False), Series({'A': 1, 'B': 3, 'C': 3})) tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2})) tm.assert_series_equal(df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})) @pytest.mark.parametrize('tz', [None, 'UTC']) def test_mean_mixed_datetime_numeric(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 df = pd.DataFrame({"A": [1, 1], "B": [pd.Timestamp('2000', tz=tz)] * 2}) result = df.mean() expected = pd.Series([1.0], index=['A']) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('tz', [None, 'UTC']) def test_mean_excludeds_datetimes(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 # Our long-term desired behavior is unclear, but the behavior in # 0.24.0rc1 was buggy. df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2}) result = df.mean() expected = pd.Series() tm.assert_series_equal(result, expected) def test_var_std(self, datetime_frame): result = datetime_frame.std(ddof=4) expected = datetime_frame.apply(lambda x: x.std(ddof=4)) tm.assert_almost_equal(result, expected) result = datetime_frame.var(ddof=4) expected = datetime_frame.apply(lambda x: x.var(ddof=4)) tm.assert_almost_equal(result, expected) arr = np.repeat(np.random.random((1, 1000)), 1000, 0) result = nanops.nanvar(arr, axis=0) assert not (result < 0).any() with pd.option_context('use_bottleneck', False): result = nanops.nanvar(arr, axis=0) assert not (result < 0).any() @pytest.mark.parametrize( "meth", ['sem', 'var', 'std']) def test_numeric_only_flag(self, meth): # GH 9201 df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) # set one entry to a number in str format df1.loc[0, 'foo'] = '100' df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) # set one entry to a non-number str df2.loc[0, 'foo'] = 'a' result = getattr(df1, meth)(axis=1, numeric_only=True) expected = getattr(df1[['bar', 'baz']], meth)(axis=1) tm.assert_series_equal(expected, result) result = getattr(df2, meth)(axis=1, numeric_only=True) expected = getattr(df2[['bar', 'baz']], meth)(axis=1) tm.assert_series_equal(expected, result) # df1 has all numbers, df2 has a letter inside msg = r"unsupported operand type\(s\) for -: 'float' and 'str'" with pytest.raises(TypeError, match=msg): getattr(df1, meth)(axis=1, numeric_only=False) msg = "could not convert string to float: 'a'" with pytest.raises(TypeError, match=msg): getattr(df2, meth)(axis=1, numeric_only=False) def test_sem(self, datetime_frame): result = datetime_frame.sem(ddof=4) expected = datetime_frame.apply( lambda x: x.std(ddof=4) / np.sqrt(len(x))) tm.assert_almost_equal(result, expected) arr = np.repeat(np.random.random((1, 1000)), 1000, 0) result = nanops.nansem(arr, axis=0) assert not (result < 0).any() with pd.option_context('use_bottleneck', False): result = nanops.nansem(arr, axis=0) assert not (result < 0).any() @td.skip_if_no_scipy def test_kurt(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]) df = DataFrame(np.random.randn(6, 3), index=index) kurt = df.kurt() kurt2 = df.kurt(level=0).xs('bar') tm.assert_series_equal(kurt, kurt2, check_names=False) assert kurt.name is None assert kurt2.name == 'bar' @pytest.mark.parametrize("dropna, expected", [ (True, {'A': [12], 'B': [10.0], 'C': [1.0], 'D': ['a'], 'E': Categorical(['a'], categories=['a']), 'F': to_datetime(['2000-1-2']), 'G': to_timedelta(['1 days'])}), (False, {'A': [12], 'B': [10.0], 'C': [np.nan], 'D': np.array([np.nan], dtype=object), 'E': Categorical([np.nan], categories=['a']), 'F': [pd.NaT], 'G': to_timedelta([pd.NaT])}), (True, {'H': [8, 9, np.nan, np.nan], 'I': [8, 9, np.nan, np.nan], 'J': [1, np.nan, np.nan, np.nan], 'K': Categorical(['a', np.nan, np.nan, np.nan], categories=['a']), 'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']), 'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']), 'N': [0, 1, 2, 3]}), (False, {'H': [8, 9, np.nan, np.nan], 'I': [8, 9, np.nan, np.nan], 'J': [1, np.nan, np.nan, np.nan], 'K': Categorical([np.nan, 'a', np.nan, np.nan], categories=['a']), 'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), 'M': to_timedelta(['nan', '1 days', 'nan', 'nan']), 'N': [0, 1, 2, 3]}) ]) def test_mode_dropna(self, dropna, expected): df = DataFrame({"A": [12, 12, 19, 11], "B": [10, 10, np.nan, 3], "C": [1, np.nan, np.nan, np.nan], "D": [np.nan, np.nan, 'a', np.nan], "E": Categorical([np.nan, np.nan, 'a', np.nan]), "F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), "G": to_timedelta(['1 days', 'nan', 'nan', 'nan']), "H": [8, 8, 9, 9], "I": [9, 9, 8, 8], "J": [1, 1, np.nan, np.nan], "K": Categorical(['a', np.nan, 'a', np.nan]), "L": to_datetime(['2000-1-2', '2000-1-2', 'NaT', 'NaT']), "M": to_timedelta(['1 days', 'nan', '1 days', 'nan']), "N": np.arange(4, dtype='int64')}) result = df[sorted(list(expected.keys()))].mode(dropna=dropna) expected = DataFrame(expected) tm.assert_frame_equal(result, expected) def test_mode_sortwarning(self): # Check for the warning that is raised when the mode # results cannot be sorted df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']}) expected = DataFrame({'A': ['a', np.nan]}) with tm.assert_produces_warning(UserWarning, check_stacklevel=False): result = df.mode(dropna=False) result = result.sort_values(by='A').reset_index(drop=True) tm.assert_frame_equal(result, expected) def test_operators_timedelta64(self): df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'), B=date_range('2012-1-2', periods=3, freq='D'), C=Timestamp('20120101') - timedelta(minutes=5, seconds=5))) diffs = DataFrame(dict(A=df['A'] - df['C'], B=df['A'] - df['B'])) # min result = diffs.min() assert result[0] == diffs.loc[0, 'A'] assert result[1] == diffs.loc[0, 'B'] result = diffs.min(axis=1) assert (result == diffs.loc[0, 'B']).all() # max result = diffs.max() assert result[0] == diffs.loc[2, 'A'] assert result[1] == diffs.loc[2, 'B'] result = diffs.max(axis=1) assert (result == diffs['A']).all() # abs result = diffs.abs() result2 = abs(diffs) expected = DataFrame(dict(A=df['A'] - df['C'], B=df['B'] - df['A'])) tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result2, expected) # mixed frame mixed = diffs.copy() mixed['C'] = 'foo' mixed['D'] = 1 mixed['E'] = 1. mixed['F'] = Timestamp('20130101') # results in an object array result = mixed.min() expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)), pd.Timedelta(timedelta(days=-1)), 'foo', 1, 1.0, Timestamp('20130101')], index=mixed.columns) tm.assert_series_equal(result, expected) # excludes numeric result = mixed.min(axis=1) expected = Series([1, 1, 1.], index=[0, 1, 2]) tm.assert_series_equal(result, expected) # works when only those columns are selected result = mixed[['A', 'B']].min(1) expected = Series([timedelta(days=-1)] * 3) tm.assert_series_equal(result, expected) result = mixed[['A', 'B']].min() expected = Series([timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=['A', 'B']) tm.assert_series_equal(result, expected) # GH 3106 df = DataFrame({'time': date_range('20130102', periods=5), 'time2': date_range('20130105', periods=5)}) df['off1'] = df['time2'] - df['time'] assert df['off1'].dtype == 'timedelta64[ns]' df['off2'] = df['time'] - df['time2'] df._consolidate_inplace() assert df['off1'].dtype == 'timedelta64[ns]' assert df['off2'].dtype == 'timedelta64[ns]' def test_sum_corner(self): empty_frame = DataFrame() axis0 = empty_frame.sum(0) axis1 = empty_frame.sum(1) assert isinstance(axis0, Series) assert isinstance(axis1, Series) assert len(axis0) == 0 assert len(axis1) == 0 @pytest.mark.parametrize('method, unit', [ ('sum', 0), ('prod', 1), ]) def test_sum_prod_nanops(self, method, unit): idx = ['a', 'b', 'c'] df = pd.DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default result = getattr(df, method) expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') # min_count=1 result = getattr(df, method)(min_count=1) expected = pd.Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 result = getattr(df, method)(min_count=0) expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') tm.assert_series_equal(result, expected) result = getattr(df.iloc[1:], method)(min_count=1) expected = pd.Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) result = getattr(df, method)(min_count=5) expected = pd.Series(result, index=['A', 'B']) tm.assert_series_equal(result, expected) result = getattr(df, method)(min_count=6) expected = pd.Series(result, index=['A', 'B']) tm.assert_series_equal(result, expected) def test_sum_nanops_timedelta(self): # prod isn't defined on timedeltas idx = ['a', 'b', 'c'] df = pd.DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]}) df2 = df.apply(pd.to_timedelta) # 0 by default result = df2.sum() expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) tm.assert_series_equal(result, expected) # min_count=0 result = df2.sum(min_count=0) tm.assert_series_equal(result, expected) # min_count=1 result = df2.sum(min_count=1) expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) tm.assert_series_equal(result, expected) def test_sum_object(self, float_frame): values = float_frame.values.astype(int) frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns) deltas = frame * timedelta(1) deltas.sum() def test_sum_bool(self, float_frame): # ensure this works, bug report bools = np.isnan(float_frame) bools.sum(1) bools.sum(0) def test_mean_corner(self, float_frame, float_string_frame): # unit test when have object data the_mean = float_string_frame.mean(axis=0) the_sum = float_string_frame.sum(axis=0, numeric_only=True) tm.assert_index_equal(the_sum.index, the_mean.index) assert len(the_mean.index) < len(float_string_frame.columns) # xs sum mixed type, just want to know it works... the_mean = float_string_frame.mean(axis=1) the_sum = float_string_frame.sum(axis=1, numeric_only=True) tm.assert_index_equal(the_sum.index, the_mean.index) # take mean of boolean column float_frame['bool'] = float_frame['A'] > 0 means = float_frame.mean(0) assert means['bool'] == float_frame['bool'].values.mean() def test_stats_mixed_type(self, float_string_frame): # don't blow up float_string_frame.std(1) float_string_frame.var(1) float_string_frame.mean(1) float_string_frame.skew(1) def test_sum_bools(self): df = DataFrame(index=lrange(1), columns=lrange(10)) bools = isna(df) assert bools.sum(axis=1)[0] == 10 # --------------------------------------------------------------------- # Cumulative Reductions - cumsum, cummax, ... def test_cumsum_corner(self): dm = DataFrame(np.arange(20).reshape(4, 5), index=lrange(4), columns=lrange(5)) # ?(wesm) result = dm.cumsum() # noqa def test_cumsum(self, datetime_frame): datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan # axis = 0 cumsum = datetime_frame.cumsum() expected = datetime_frame.apply(Series.cumsum) tm.assert_frame_equal(cumsum, expected) # axis = 1 cumsum = datetime_frame.cumsum(axis=1) expected = datetime_frame.apply(Series.cumsum, axis=1) tm.assert_frame_equal(cumsum, expected) # works df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) result = df.cumsum() # noqa # fix issue cumsum_xs = datetime_frame.cumsum(axis=1) assert np.shape(cumsum_xs) == np.shape(datetime_frame) def test_cumprod(self, datetime_frame): datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan # axis = 0 cumprod = datetime_frame.cumprod() expected = datetime_frame.apply(Series.cumprod) tm.assert_frame_equal(cumprod, expected) # axis = 1 cumprod = datetime_frame.cumprod(axis=1) expected = datetime_frame.apply(Series.cumprod, axis=1) tm.assert_frame_equal(cumprod, expected) # fix issue cumprod_xs = datetime_frame.cumprod(axis=1) assert np.shape(cumprod_xs) == np.shape(datetime_frame) # ints df = datetime_frame.fillna(0).astype(int) df.cumprod(0) df.cumprod(1) # ints32 df = datetime_frame.fillna(0).astype(np.int32) df.cumprod(0) df.cumprod(1) def test_cummin(self, datetime_frame): datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan # axis = 0 cummin = datetime_frame.cummin() expected = datetime_frame.apply(Series.cummin) tm.assert_frame_equal(cummin, expected) # axis = 1 cummin = datetime_frame.cummin(axis=1) expected = datetime_frame.apply(Series.cummin, axis=1) tm.assert_frame_equal(cummin, expected) # it works df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) result = df.cummin() # noqa # fix issue cummin_xs = datetime_frame.cummin(axis=1) assert np.shape(cummin_xs) == np.shape(datetime_frame) def test_cummax(self, datetime_frame): datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan # axis = 0 cummax = datetime_frame.cummax() expected = datetime_frame.apply(Series.cummax) tm.assert_frame_equal(cummax, expected) # axis = 1 cummax = datetime_frame.cummax(axis=1) expected = datetime_frame.apply(Series.cummax, axis=1) tm.assert_frame_equal(cummax, expected) # it works df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) result = df.cummax() # noqa # fix issue cummax_xs = datetime_frame.cummax(axis=1) assert np.shape(cummax_xs) == np.shape(datetime_frame) # --------------------------------------------------------------------- # Miscellanea def test_count(self): # corner case frame = DataFrame() ct1 = frame.count(1) assert isinstance(ct1, Series) ct2 = frame.count(0) assert isinstance(ct2, Series) # GH#423 df = DataFrame(index=lrange(10)) result = df.count(1) expected = Series(0, index=df.index) tm.assert_series_equal(result, expected) df = DataFrame(columns=lrange(10)) result = df.count(0) expected = Series(0, index=df.columns) tm.assert_series_equal(result, expected) df = DataFrame() result = df.count() expected = Series(0, index=[]) tm.assert_series_equal(result, expected) def test_count_objects(self, float_string_frame): dm = DataFrame(float_string_frame._series) df = DataFrame(float_string_frame._series) tm.assert_series_equal(dm.count(), df.count()) tm.assert_series_equal(dm.count(1), df.count(1)) def test_pct_change(self): # GH#11150 pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(0, 40, 10)]).astype(np.float64) pnl.iat[1, 0] = np.nan pnl.iat[1, 1] = np.nan pnl.iat[2, 3] = 60 for axis in range(2): expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift( axis=axis) - 1 result = pnl.pct_change(axis=axis, fill_method='pad') tm.assert_frame_equal(result, expected) # ---------------------------------------------------------------------- # Index of max / min def test_idxmin(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan frame.loc[15:20, -2:] = np.nan for skipna in [True, False]: for axis in [0, 1]: for df in [frame, int_frame]: result = df.idxmin(axis=axis, skipna=skipna) expected = df.apply(Series.idxmin, axis=axis, skipna=skipna) tm.assert_series_equal(result, expected) msg = ("No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>") with pytest.raises(ValueError, match=msg): frame.idxmin(axis=2) def test_idxmax(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan frame.loc[15:20, -2:] = np.nan for skipna in [True, False]: for axis in [0, 1]: for df in [frame, int_frame]: result = df.idxmax(axis=axis, skipna=skipna) expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) tm.assert_series_equal(result, expected) msg = ("No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>") with pytest.raises(ValueError, match=msg): frame.idxmax(axis=2) # ---------------------------------------------------------------------- # Logical reductions @pytest.mark.parametrize('opname', ['any', 'all']) def test_any_all(self, opname, bool_frame_with_na, float_string_frame): assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na, has_skipna=True) assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, has_bool_only=True) def test_any_all_extra(self): df = DataFrame({ 'A': [True, False, False], 'B': [True, True, False], 'C': [True, True, True], }, index=['a', 'b', 'c']) result = df[['A', 'B']].any(1) expected = Series([True, True, False], index=['a', 'b', 'c']) tm.assert_series_equal(result, expected) result = df[['A', 'B']].any(1, bool_only=True) tm.assert_series_equal(result, expected) result = df.all(1) expected = Series([True, False, False], index=['a', 'b', 'c']) tm.assert_series_equal(result, expected) result = df.all(1, bool_only=True) tm.assert_series_equal(result, expected) # Axis is None result = df.all(axis=None).item() assert result is False result = df.any(axis=None).item() assert result is True result = df[['C']].all(axis=None).item() assert result is True def test_any_datetime(self): # GH 23070 float_data = [1, np.nan, 3, np.nan] datetime_data = [pd.Timestamp('1960-02-15'), pd.Timestamp('1960-02-16'), pd.NaT, pd.NaT] df = DataFrame({ "A": float_data, "B": datetime_data }) result = df.any(1) expected = Series([True, True, True, False]) tm.assert_series_equal(result, expected) def test_any_all_bool_only(self): # GH 25101 df = DataFrame({"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]}) result = df.all(bool_only=True) expected = Series(dtype=np.bool) tm.assert_series_equal(result, expected) df = DataFrame({"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None], "col4": [False, False, True]}) result = df.all(bool_only=True) expected = Series({"col4": False}) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('func, data, expected', [ (np.any, {}, False), (np.all, {}, True), (np.any, {'A': []}, False), (np.all, {'A': []}, True), (np.any, {'A': [False, False]}, False), (np.all, {'A': [False, False]}, False), (np.any, {'A': [True, False]}, True), (np.all, {'A': [True, False]}, False), (np.any, {'A': [True, True]}, True), (np.all, {'A': [True, True]}, True), (np.any, {'A': [False], 'B': [False]}, False), (np.all, {'A': [False], 'B': [False]}, False), (np.any, {'A': [False, False], 'B': [False, True]}, True), (np.all, {'A': [False, False], 'B': [False, True]}, False), # other types (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False), (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True), (np.all, {'A': pd.Series([0, 1], dtype=int)}, False), (np.any, {'A':
pd.Series([0, 1], dtype=int)
pandas.Series
import os import pandas as pd import pytest from pandas.testing import assert_frame_equal from .. import read_sql @pytest.fixture(scope="module") # type: ignore def postgres_url() -> str: conn = os.environ["POSTGRES_URL"] return conn @pytest.mark.xfail def test_on_non_select(postgres_url: str) -> None: query = "CREATE TABLE non_select(id INTEGER NOT NULL)" df = read_sql(postgres_url, query) def test_aggregation(postgres_url: str) -> None: query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool" df = read_sql(postgres_url, query) expected = pd.DataFrame( index=range(3), data={ "test_bool": pd.Series([None, False, True], dtype="boolean"), "sum": pd.Series([10.9, 5.2, -10.0], dtype="float64") } ) assert_frame_equal(df, expected, check_names=True) def test_partition_on_aggregation(postgres_url: str) -> None: query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool" df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2) expected = pd.DataFrame( index=range(3), data={ "test_bool": pd.Series([None, False, True], dtype="boolean"), "test_int": pd.Series([4, 5, 1315], dtype="Int64") } ) assert_frame_equal(df, expected, check_names=True) def test_aggregation2(postgres_url: str) -> None: query = "select DISTINCT(test_bool) from test_table" df = read_sql(postgres_url, query) expected = pd.DataFrame( index=range(3), data={ "test_bool": pd.Series([None, False, True], dtype="boolean"), } )
assert_frame_equal(df, expected, check_names=True)
pandas.testing.assert_frame_equal
# -*- coding: utf-8 -*- """ Created on Wed Jun 20 09:34:37 2018 @author: SilverDoe """ ''' ========================== DataFrame ========================================== pandas.DataFrame( data, index, columns, dtype, copy) Parameters : ============ 1. data : data takes various forms like ndarray, series, map, lists, dict, constants and also another DataFrame. 2. index : For the row labels, the Index to be used for the resulting frame is Optional Default np.arrange(n) if no index is passed. 3. columns : For column labels, the optional default syntax is - np.arrange(n). This is only true if no index is passed. 4. dtype : Data type of each column. 5. copy : This command (or whatever it is) is used for copying of data, if the default is False. ''' #=============== Empty DataFrame ================================================= import pandas as pd df = pd.DataFrame() print(df) #============= DataFrame from Lists ============================================ # no index passed, no column names given import pandas as pd data = [1,2,3,4,5] df = pd.DataFrame(data) print(df) # no index passed, column names given import pandas as pd data = [['Natsu',13],['Lisanna',9],['Happy',1]] df = pd.DataFrame(data,columns=['Name','Age']) print(df) # no index passed, column names given, datatype passed import pandas as pd data = [['Natsu',13],['Lisanna',8],['Happy',1]] df = pd.DataFrame(data,columns=['Name','Age'],dtype=float) print(df) #========== Dataframe from Dictionary of ndarrays/lists ============================================ ''' >>All the ndarrays must be of same length. If index is passed, then the length of the index should equal to the length of the arrays. >> To preserve the order of the columns: 1. use ordered doctionary, since dictionaries will not preserve the order when created. 2. use columns index while creating the dataframe. 3. use reorder the columns the way you want by using df = df[list of column names in the order you want] ''' # using arrays, No index given. import pandas as pd data = {'Name':['Lisanna', 'Natsu', 'Erza', 'Gray'],'Age':[15,20,23,20]} df = pd.DataFrame(data) print(df) # using arrays, Index given. import pandas as pd data = {'Name':['Lisanna', 'Natsu', 'Erza', 'Gray'],'Age':[15,20,23,20]} df = pd.DataFrame(data, index=['rank1','rank2','rank3','rank4']) print(df) ''' >>List of Dictionaries can be passed as input data to create a DataFrame. The dictionary keys are by default taken as column names. >>NaN (Not a Number) is appended in missing areas when using lists instead of arrays. ''' # using lists, no index given import pandas as pd data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}] df = pd.DataFrame(data) print(df) # using lists,index given import pandas as pd data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}] df = pd.DataFrame(data, index=['first', 'second']) print(df) # using lists,index given, columns given import pandas as pd data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}] #With two column indices, values same as dictionary keys df1 = pd.DataFrame(data, index=['first', 'second'], columns=['a', 'b']) print(df1) #With two column indices with one index with other name df2 = pd.DataFrame(data, index=['first', 'second'], columns=['a', 'b1']) print(df2) #using dictionary of Series import pandas as pd d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']), 'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])} df = pd.DataFrame(d) print(df) # using ordered dictionary to preserve the order of the columns import numpy as np import pandas as pd from collections import OrderedDict a = np.array( [ 1, 2, 3 ] ) b = np.array( [ 4, 5, 6 ] ) c = np.array( [ 7, 8, 9 ] ) nd = { 'p': pd.Series(a), 'z': pd.Series(b), 'n': pd.Series(c) } # normal dictionary od = OrderedDict( { 'p': pd.Series(a), 'z': pd.Series(b), 'n': pd.Series(c) } ) # ordered doctionary df =
pd.DataFrame(od)
pandas.DataFrame
# Copyright (c) 2018, NVIDIA CORPORATION. from timeit import default_timer as timer import numpy as np import pandas as pd import pytest import cudf from cudf.dataframe import DataFrame from cudf.tests.utils import assert_eq def make_params(): np.random.seed(0) hows = "left,inner,outer,right".split(",") methods = "hash,sort".split(",") # Test specific cases (1) aa = [0, 0, 4, 5, 5] bb = [0, 0, 2, 3, 5] for how in hows: if how in ["left", "inner", "right"]: for method in methods: yield (aa, bb, how, method) else: yield (aa, bb, how, "sort") # Test specific cases (2) aa = [0, 0, 1, 2, 3] bb = [0, 1, 2, 2, 3] for how in hows: if how in ["left", "inner", "right"]: for method in methods: yield (aa, bb, how, method) else: yield (aa, bb, how, "sort") # Test large random integer inputs aa = np.random.randint(0, 50, 100) bb = np.random.randint(0, 50, 100) for how in hows: if how in ["left", "inner", "right"]: for method in methods: yield (aa, bb, how, method) else: yield (aa, bb, how, "sort") # Test floating point inputs aa = np.random.random(50) bb = np.random.random(50) for how in hows: if how in ["left", "inner", "right"]: for method in methods: yield (aa, bb, how, method) else: yield (aa, bb, how, "sort") @pytest.mark.parametrize("aa,bb,how,method", make_params()) def test_dataframe_join_how(aa, bb, how, method): df = DataFrame() df["a"] = aa df["b"] = bb def work_pandas(df): ts = timer() df1 = df.set_index("a") df2 = df.set_index("b") joined = df1.join(df2, how=how, sort=True) te = timer() print("timing", type(df), te - ts) return joined def work_gdf(df): ts = timer() df1 = df.set_index("a") df2 = df.set_index("b") joined = df1.join(df2, how=how, sort=True, method=method) te = timer() print("timing", type(df), te - ts) return joined expect = work_pandas(df.to_pandas()) got = work_gdf(df) expecto = expect.copy() goto = got.copy() # Type conversion to handle NoneType expectb = expect.b expecta = expect.a gotb = got.b gota = got.a del got["b"] got.add_column("b", gotb.astype(np.float64).fillna(np.nan)) del got["a"] got.add_column("a", gota.astype(np.float64).fillna(np.nan)) expect.drop(["b"], axis=1) expect["b"] = expectb.astype(np.float64).fillna(np.nan) expect.drop(["a"], axis=1) expect["a"] = expecta.astype(np.float64).fillna(np.nan) assert got.index.name is None assert list(expect.columns) == list(got.columns) # test disabled until libgdf sort join gets updated with new api if method == "hash": assert np.all(expect.index.values == got.index.values) if how != "outer": # Newly introduced ambiguous ValueError thrown when # an index and column have the same name. Rename the # index so sorts work. # TODO: What is the less hacky way? expect.index.name = "bob" got.index.name = "mary" pd.util.testing.assert_frame_equal( got.to_pandas().sort_values(["b", "a"]).reset_index(drop=True), expect.sort_values(["b", "a"]).reset_index(drop=True), ) # if(how=='right'): # _sorted_check_series(expect['a'], expect['b'], # got['a'], got['b']) # else: # _sorted_check_series(expect['b'], expect['a'], got['b'], # got['a']) else: _check_series(expecto["b"].fillna(-1), goto["b"].fillna(-1)) _check_series(expecto["a"].fillna(-1), goto["a"].fillna(-1)) def _check_series(expect, got): magic = 0xDEADBEAF # print("expect\n", expect) # print("got\n", got.to_string(nrows=None)) direct_equal = np.all(expect.values == got.to_array()) nanfilled_equal = np.all( expect.fillna(magic).values == got.fillna(magic).to_array() ) msg = "direct_equal={}, nanfilled_equal={}".format( direct_equal, nanfilled_equal ) assert direct_equal or nanfilled_equal, msg def test_dataframe_join_suffix(): np.random.seed(0) df = DataFrame() for k in "abc": df[k] = np.random.randint(0, 5, 5) left = df.set_index("a") right = df.set_index("c") with pytest.raises(ValueError) as raises: left.join(right) raises.match( "there are overlapping columns but lsuffix" " and rsuffix are not defined" ) got = left.join(right, lsuffix="_left", rsuffix="_right", sort=True) # Get expected value pddf = df.to_pandas() expect = pddf.set_index("a").join( pddf.set_index("c"), lsuffix="_left", rsuffix="_right" ) # Check assert list(expect.columns) == list(got.columns) assert np.all(expect.index.values == got.index.values) for k in expect.columns: _check_series(expect[k].fillna(-1), got[k].fillna(-1)) def test_dataframe_join_cats(): lhs = DataFrame() lhs["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc")) lhs["b"] = bb = np.arange(len(lhs)) lhs = lhs.set_index("a") rhs = DataFrame() rhs["a"] = pd.Categorical(list("abcac"), categories=list("abc")) rhs["c"] = cc = np.arange(len(rhs)) rhs = rhs.set_index("a") got = lhs.join(rhs) expect = lhs.to_pandas().join(rhs.to_pandas()) # Note: pandas make a object Index after joining pd.util.testing.assert_frame_equal( got.sort_values(by="b") .to_pandas() .sort_index() .reset_index(drop=True), expect.reset_index(drop=True), ) # Just do some rough checking here. assert list(got.columns) == ["b", "c"] assert len(got) > 0 assert set(got.index.values) & set("abc") assert set(got["b"]) & set(bb) assert set(got["c"]) & set(cc) @pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) def test_dataframe_join_mismatch_cats(how): pdf1 = pd.DataFrame( { "join_col": ["a", "b", "c", "d", "e"], "data_col_left": [10, 20, 30, 40, 50], } ) pdf2 = pd.DataFrame( {"join_col": ["c", "e", "f"], "data_col_right": [6, 7, 8]} ) pdf1["join_col"] = pdf1["join_col"].astype("category") pdf2["join_col"] = pdf2["join_col"].astype("category") gdf1 = DataFrame.from_pandas(pdf1) gdf2 = DataFrame.from_pandas(pdf2) gdf1 = gdf1.set_index("join_col") gdf2 = gdf2.set_index("join_col") pdf1 = pdf1.set_index("join_col") pdf2 = pdf2.set_index("join_col") join_gdf = gdf1.join(gdf2, how=how, sort=True, method="hash") join_pdf = pdf1.join(pdf2, how=how) got = join_gdf.to_pandas() expect = join_pdf.fillna(-1) # note: cudf join doesn't mask NA # cudf creates the columns in different order than pandas for right join if how == "right": got = got[["data_col_left", "data_col_right"]] expect.data_col_right = expect.data_col_right.astype(np.int64) expect.data_col_left = expect.data_col_left.astype(np.int64) # Expect has the wrong index type. Quick fix to get index type working # again I think this implies that CategoricalIndex.to_pandas() is not # working correctly, since the below corrects it. Remove this line for # an annoying error. TODO: Make CategoricalIndex.to_pandas() work # correctly for the below case. # Error: # AssertionError: Categorical Expected type <class # 'pandas.core.arrays.categorical.Categorical'>, found <class # 'numpy.ndarray'> instead expect.index = pd.Categorical(expect.index) pd.util.testing.assert_frame_equal( got, expect, check_names=False, check_index_type=False, # For inner joins, pandas returns # weird categories. check_categorical=how != "inner", ) assert list(got.index) == list(expect.index) @pytest.mark.parametrize("on", ["key1", ["key1", "key2"], None]) def test_dataframe_merge_on(on): np.random.seed(0) # Make cuDF df_left = DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = DataFrame() nelem = 500 df_right["key1"] = np.random.randint(0, 30, nelem) df_right["key2"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) # Make pandas DF pddf_left = df_left.to_pandas() pddf_right = df_right.to_pandas() # Expected result (from pandas) pddf_joined = pddf_left.merge(pddf_right, on=on, how="left") # Test (from cuDF; doesn't check for ordering) join_result = df_left.merge(df_right, on=on, how="left") join_result_cudf = cudf.merge(df_left, df_right, on=on, how="left") join_result["right_val"] = ( join_result["right_val"].astype(np.float64).fillna(np.nan) ) join_result_cudf["right_val"] = ( join_result_cudf["right_val"].astype(np.float64).fillna(np.nan) ) for col in list(pddf_joined.columns): if col.count("_y") > 0: join_result[col] = ( join_result[col].astype(np.float64).fillna(np.nan) ) join_result_cudf[col] = ( join_result_cudf[col].astype(np.float64).fillna(np.nan) ) # Test dataframe equality (ignore order of rows and columns) cdf_result = ( join_result.to_pandas() .sort_values(list(pddf_joined.columns)) .reset_index(drop=True) ) pdf_result = pddf_joined.sort_values( list(pddf_joined.columns) ).reset_index(drop=True) pd.util.testing.assert_frame_equal(cdf_result, pdf_result, check_like=True) merge_func_result_cdf = ( join_result_cudf.to_pandas() .sort_values(list(pddf_joined.columns)) .reset_index(drop=True) ) pd.util.testing.assert_frame_equal( merge_func_result_cdf, cdf_result, check_like=True ) def test_dataframe_merge_on_unknown_column(): np.random.seed(0) # Make cuDF df_left = DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = DataFrame() nelem = 500 df_right["key1"] = np.random.randint(0, 30, nelem) df_right["key2"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) with pytest.raises(KeyError) as raises: df_left.merge(df_right, on="bad_key", how="left") raises.match("bad_key") def test_dataframe_merge_no_common_column(): np.random.seed(0) # Make cuDF df_left = DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = DataFrame() nelem = 500 df_right["key3"] = np.random.randint(0, 30, nelem) df_right["key4"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) with pytest.raises(ValueError) as raises: df_left.merge(df_right, how="left") raises.match("No common columns to perform merge on") def test_dataframe_empty_merge(): gdf1 = DataFrame([("a", []), ("b", [])]) gdf2 = DataFrame([("a", []), ("c", [])]) expect = DataFrame([("a", []), ("b", []), ("c", [])]) got = gdf1.merge(gdf2, how="left", on=["a"]) assert_eq(expect, got) def test_dataframe_merge_order(): gdf1 = DataFrame() gdf2 = DataFrame() gdf1["id"] = [10, 11] gdf1["timestamp"] = [1, 2] gdf1["a"] = [3, 4] gdf2["id"] = [4, 5] gdf2["a"] = [7, 8] gdf = gdf1.merge(gdf2, how="left", on=["id", "a"], method="hash") df1 =
pd.DataFrame()
pandas.DataFrame
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import pandas as pd import numpy as np from enum import Enum from datasets import make_dataset def load_eval_negative_samples(eval_sessions_negative_samples_json_path): eval_sessions_neg_samples_df = pd.read_json(eval_sessions_negative_samples_json_path, lines=True, dtype={'session_id': np.int64}) eval_sessions_neg_samples = dict(eval_sessions_neg_samples_df[['session_id', 'negative_items']].values) return eval_sessions_neg_samples G1_DATASET = "gcom" ADRESSA_DATASET = "adressa" class DataLoader: def __init__(self, dataset): features_config = self.get_session_features_config(dataset) self.init_dataset_iterator_local(features_config, batch_size=1) def get_session_features_config(self, dataset): user_id_type = 'int' if dataset == G1_DATASET else 'bytes' session_features_config = { 'single_features': { ##Control features 'user_id': {'type': 'categorical', 'dtype': user_id_type}, 'session_id': {'type': 'categorical', 'dtype': 'int'}, #'session_id': {'type': 'categorical', 'dtype': 'string'}, 'session_start': {'type': 'categorical', 'dtype': 'int'}, 'session_size': {'type': 'categorical', 'dtype': 'int'}, }, 'sequence_features': { #Required sequence features 'event_timestamp': {'type': 'categorical', 'dtype': 'int'}, 'item_clicked': {'type': 'categorical', 'dtype': 'int'}, #, 'cardinality': 364047}, } } return session_features_config def init_dataset_iterator_local(self, features_config, batch_size=128, truncate_session_length=20): with tf.device('/cpu:0'): self.files_placeholder = tf.placeholder(tf.string) # Make a dataset ds = make_dataset(self.files_placeholder, features_config, batch_size=batch_size, truncate_sequence_length=truncate_session_length) # Define an abstract iterator that has the shape and type of our datasets iterator = tf.data.Iterator.from_structure(ds.output_types, ds.output_shapes) # This is an op that gets the next element from the iterator self.next_element_op = iterator.get_next() # These ops let us switch and reinitialize every time we finish an epoch self.iterator_init_op = iterator.make_initializer(ds) def load_dataframe(self, data_filenames): data = [] session_cnt = 0 repeated = 0 with tf.Session() as sess: sess.run(self.iterator_init_op, feed_dict={self.files_placeholder: data_filenames}) while True: try: #One session by batch batch_inputs, batch_labels = sess.run(self.next_element_op) item_ids_session = set() session_id = batch_inputs['session_id'][0] for item, ts in zip(batch_inputs['item_clicked'][0], batch_inputs['event_timestamp'][0]): if item in item_ids_session: repeated += 1 item_ids_session.add(item) data.append((session_id, item, ts)) #Adding last item (label) last_item = batch_labels['label_last_item'][0][0] if last_item in item_ids_session: repeated += 1 data.append((session_id, last_item, ts)) session_cnt += 1 #if cnt % 100 == 0: # print("Sessions processed: {} - Clicks: {}".format(session_cnt, len(data))) except tf.errors.OutOfRangeError as e: break if len(data) > 0: print("Sessions read: {} - Clicks: {} - Repeated Clicks: {}".format(session_cnt, len(data), repeated)) else: print('WARNING: NO DATA FOUND!') data_df =
pd.DataFrame(data, columns=['SessionId', 'ItemId', 'Time'])
pandas.DataFrame
import pandas as pd period = pd.Period('2020-06', freq='M') print(period) print(period.asfreq('D', 'start')) print(period.asfreq('D', 'end')) # Can perform period arithmetic - increment month print(period + 1) # Can create period range per month in a year monthly_period_range =
pd.period_range('2020-01', '2021-12', freq='M')
pandas.period_range
import unittest import pandas as pd import numpy as np import datetime import pytz from variable_explorer_helpers import describe_pd_dataframe class TestDataframeDescribe(unittest.TestCase): def test_dataframe(self): df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) result = describe_pd_dataframe(df) self.assertEqual(result['row_count'], 2) self.assertEqual(result['column_count'], 2) self.assertEqual(len(result['rows_top']), 2) self.assertEqual(result['rows_bottom'], None) self.assertEqual(result['columns'][0]['name'], 'col1') def test_dataframe_sort(self): df = pd.DataFrame(data={'col1': [3, 1, 2]}) result = describe_pd_dataframe(df.sort_values('col1')) self.assertEqual(result['rows_top'][0]['col1'], 1) self.assertEqual(result['rows_top'][1]['col1'], 2) self.assertEqual(result['rows_top'][2]['col1'], 3) # _deepnote_index_column is hidden on frontend. See variable_explorer_helpers for more info. self.assertEqual(result['rows_top'][0]['_deepnote_index_column'], 1) # TODO: Support non-hashable types like [] def test_categorical_columns(self): df = pd.DataFrame(data={ 'cat1': ['a', 'b', 'c', 'd'], 'cat2': ['a', 'b', None, 'd'], # 'cat3': [1, (2,3), '4', []], 'cat3': [1, (2,3), '4', 5], 'cat4': [True, True, True, False], }) result = describe_pd_dataframe(df) self.assertEqual(result['row_count'], 4) self.assertEqual(result['column_count'], 4) self.assertEqual(len(result['rows_top']), 4) self.assertEqual(result['rows_bottom'], None) self.assertDictEqual(result['columns'][0], { 'name': 'cat1', 'dtype': 'object', 'stats': { 'unique_count': 4, 'nan_count': 0, 'categories': [ {'name': 'a', 'count': 1}, {'name': 'b', 'count': 1}, {'name': '2 others', 'count': 2}, ] }, }) self.assertEqual(result['columns'][1]['stats']['categories'], [ {'name': 'a', 'count': 1}, {'name': '2 others', 'count': 2}, {'name': 'Missing', 'count': 1}, ]) # TODO: Support for big ints which can't be converted to float64 and complex numbers def test_numerical_columns(self): df = pd.DataFrame(data={ 'col1': [1, 2, 3, 4], 'col2': [1, 2, None, 4], # 'col3': [1, 2.1, complex(-1.0, 0.0), 10**1000] 'col3': [1, 2.1, 3, 4] }) result = describe_pd_dataframe(df) self.assertEqual(result['row_count'], 4) self.assertEqual(result['column_count'], 3) self.assertEqual(len(result['rows_top']), 4) self.assertEqual(result['rows_bottom'], None) self.assertEqual(result['columns'][0]['name'], 'col1') def test_big_dataframe(self): import numpy as np df = pd.DataFrame(data={ 'col1': np.arange(100000), 'col2': np.arange(100000), 'col3': np.arange(100000), }) result = describe_pd_dataframe(df) self.assertEqual(result['row_count'], 100000) self.assertEqual(result['column_count'], 3) self.assertEqual(len(result['rows_top']), 166) self.assertEqual(len(result['rows_bottom']), 167) self.assertTrue('stats' in result['columns'][0]) self.assertTrue('stats' not in result['columns'][1]) df = pd.DataFrame(data={ 'col1': np.arange(200000), 'col2': np.arange(200000), 'col3': np.arange(200000), }) result = describe_pd_dataframe(df) self.assertTrue('stats' not in result['columns'][0]) def test_no_rows(self): df = pd.DataFrame(data={ 'col1': [], 'col2': [], }) result = describe_pd_dataframe(df) self.assertEqual(result['row_count'], 0) self.assertEqual(result['column_count'], 2) def test_no_columns(self): df =
pd.DataFrame(data={})
pandas.DataFrame
# coding: utf-8 # In[1]: """Running basic code: Importing packages, setting working directory, printing out date""" from IPython.display import HTML import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from IPython.display import YouTubeVideo from scipy.spatial.distance import pdist, squareform from scipy.cluster.hierarchy import linkage, dendrogram from matplotlib.colors import ListedColormap import networkx as nx import urllib import os as os import pandas as pd import numpy as np import itertools import networkx as nx from bokeh.io import show, output_file from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, BoxZoomTool, ResetTool, PanTool, WheelZoomTool import bokeh.models.graphs as graphs #from bokeh.model.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes from bokeh.palettes import Spectral4 plt.rcParams['figure.figsize'] = (16, 9) plt.rcParams['font.size'] = 9 plt.rcParams['font.family'] = 'Times New Roman' plt.rcParams['axes.labelsize'] = plt.rcParams['font.size'] plt.rcParams['axes.titlesize'] = 1.5*plt.rcParams['font.size'] plt.rcParams['legend.fontsize'] = plt.rcParams['font.size'] plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size'] plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size'] plt.rcParams['savefig.dpi'] = 600 plt.rcParams['xtick.major.size'] = 3 plt.rcParams['xtick.minor.size'] = 3 plt.rcParams['xtick.major.width'] = 1 plt.rcParams['xtick.minor.width'] = 1 plt.rcParams['ytick.major.size'] = 3 plt.rcParams['ytick.minor.size'] = 3 plt.rcParams['ytick.major.width'] = 1 plt.rcParams['ytick.minor.width'] = 1 plt.rcParams['legend.frameon'] = False plt.rcParams['legend.loc'] = 'center left' plt.rcParams['axes.linewidth'] = 1 plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.gca().xaxis.set_ticks_position('bottom') plt.gca().yaxis.set_ticks_position('left') plt.gca().spines['right'].set_color('none') plt.gca().spines['top'].set_color('none') plt.gca().xaxis.set_ticks_position('bottom') plt.gca().yaxis.set_ticks_position('left') sns.set_style('white') plt.close() ############################################################################################# ############################################################################################# def plot_unipartite_network (title,network, network_name, layout_func): """Creating positions of the nodes""" if layout_func == 'fruchterman_reingold': layout = nx.fruchterman_reingold_layout(network, scale=2 )#k = 0.05, iterations=500 elif layout_func =='spring': layout = nx.spring_layout(network, k = 0.05, scale=2) elif layout_func =='circular': layout = nx.circular_layout(network, scale=1, center=None, dim=2) elif layout_func == 'kamada': layout = nx.kamada_kawai_layout(network, scale=1, center=None, dim=2) elif layout_func == 'spectral': layout = nx.spectral_layout(network, scale=1, center=None, dim=2) else: layout = nx.fruchterman_reingold_layout(network, scale=2 )#k = 0.05, iterations=500 from bokeh.models import ColumnDataSource from bokeh.plotting import show, figure , output_file from bokeh.io import output_notebook from bokeh.models import HoverTool output_notebook() nodes, nodes_coordinates = zip(*layout.items()) nodes_xs, nodes_ys = list(zip(*nodes_coordinates)) #nodes_source = ColumnDataSource(dict(x=nodes_xs, y=nodes_ys, # name=nodes,)) node_data = dict(x=nodes_xs, y=nodes_ys, name=nodes) nd = pd.DataFrame.from_dict(node_data).dropna() #hostc = '#377eb8' nodes_source = ColumnDataSource(dict(x=nd.x.tolist(), y=nd.y.tolist(), name = nd.name.tolist())) """ Generate the figure 1. Create tools 2. Set plot size and tools """ #hover = HoverTool(tooltips=[('', '@name')]) #hover = HoverTool(names=["name"]) plot = figure(title=title, plot_width=800, plot_height=800, tools=['pan','wheel_zoom', 'reset','box_zoom','tap' ]) """ plot main circles 1. Plot only nodes according to their positions """ r_circles = plot.circle('x', 'y', source=nodes_source, size=10, color= '#377eb8', alpha=0.5, level = 'overlay',name='name') """ Function Get data for generation of edges """ def get_edges_specs(_network, _layout): c = dict(xs=[], ys=[], alphas=[]) #print d weights = [d['weight'] for u, v, d in _network.edges(data=True)] max_weight = max(weights) calc_alpha = lambda h: 0.1 + 0.5 * (h / max_weight) # example: { ..., ('user47', 'da_bjoerni', {'weight': 3}), ... } for u, v, data in _network.edges(data=True): c['xs'].append([_layout[u][0], _layout[v][0]]) c['ys'].append([_layout[u][1], _layout[v][1]]) c['alphas'].append(calc_alpha(data['weight'])) return c """ get the data for edges """ lines_source = ColumnDataSource(get_edges_specs(network, layout)) """ plot edge lines """ r_lines = plot.multi_line('xs', 'ys', line_width=1.5, alpha=1 , color='#b3b6b7', source=lines_source, )#name = 'edge' """Centrality """ centrality = nx.algorithms.centrality.betweenness_centrality(network) """ first element are nodes again """ _, nodes_centrality = zip(*centrality.items()) max_centraliy = max(nodes_centrality) nodes_source.add([7 + 15 * t / max_centraliy for t in nodes_centrality], 'centrality') """Communities""" from community import community_louvain partition = community_louvain.best_partition(network) p_, nodes_community = zip(*partition.items()) nodes_source.add(nodes_community, 'community') community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628', '#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec', '#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d', '#666666'] nodes_source.add([community_colors[t % len(community_colors)] for t in nodes_community],'community_color') """Host Type colour""" """Update the plot with communities and Centrality""" r_circles.glyph.size = 'centrality' r_circles.glyph.fill_color = 'community_color' hover = HoverTool(tooltips=[('', '@name')], renderers=[r_circles]) plot.add_tools(hover) output_file(network_name+"_unipartite.html") show(plot) ############################################################################################# ############################################################################################# def construct_bipartite_host_virus_network(dataframe, network_name, plot= False, filter_file= False, taxonomic_filter = None): #if data_filename: # """Importing all the data # data: """ # if ".pickle" in data_filename: # data = pd.read_pickle(data_filename,) # else: # data = pd.read_csv(data_filename, encoding='ISO-8859-1', low_memory=False) data = dataframe """ filter data according to viral family """ if taxonomic_filter: data = data[data.viral_family == taxonomic_filter] """hosttaxa: creating dataframe of unique hosts and their characteristics to generate nodes""" hosttaxa = data.groupby(['ScientificName']).size().reset_index().rename(columns={0:'count'}) """vlist: creating list of unique viruses to generate nodes""" vlist = data.virus_name.dropna().unique().tolist() """Construction of network""" from networkx.algorithms import bipartite DG=nx.Graph() """Initiating host nodes""" for index, row in hosttaxa.iterrows(): DG.add_node(row['ScientificName'], type="host", speciesname = row['ScientificName'], bipartite = 0 ) """Initiating virus nodes""" for virus in vlist: DG.add_node(virus, type="virus", virusname = virus, bipartite = 1) """Iterating through the raw data to add Edges if a virus is found in a host""" """Iterating through the raw data to add Edges if a virus is found in a host""" if filter_file: for index, row in data.iterrows(): if row.ConfirmationResult == 'Positive': DG.add_edge(row['ScientificName'], row['virus_name'], AnimalID = 'AnimalID', weight = 1) else: for index, row in data.iterrows(): DG.add_edge(row['ScientificName'], row['virus_name'], weight = 1) """Creating positions of the nodes""" #layout = nx.spring_layout(DG, k = 0.05, scale=2) # layout = nx.fruchterman_reingold_layout(DG, k = 0.05, iterations=50) """write graph """ nx.write_graphml(DG, network_name + "_bipartite.graphml") """ Plotting """ if plot: from bokeh.models import ColumnDataSource nodes, nodes_coordinates = zip(*layout.items()) nodes_xs, nodes_ys = list(zip(*nodes_coordinates)) node_data = dict(x=nodes_xs, y=nodes_ys, name=nodes) nd = pd.DataFrame.from_dict(node_data) def addNodeType(c): if c.name in vlist: return 'Virus' else: return 'Host' #nd['node_type'] = nd.apply(addNodeType, axis=1) virusc = '#ef8a62' # ,'#e05354' hostc = '#67a9cf' nt = [] nodecolors = [] for i in range (nd.shape[0]): if nd.name[i] in vlist: nt.append('virus') nodecolors.append(virusc) else: nt.append('host') nodecolors.append(hostc) nd['node_type'] = nt nd['colors'] = nodecolors #nodes_source = ColumnDataSource(nd.to_dict()) nodes_source = ColumnDataSource(dict(x=nd.x.tolist(), y=nd.y.tolist(), name = nd.name.tolist(), node_type = nd.node_type.tolist(), colors = nd.colors.tolist())) from bokeh.plotting import show, figure , output_file from bokeh.io import output_notebook from bokeh.models import HoverTool output_notebook() """ Generate the figure 1. Create tools 2. Set plot size and tools """ #hover = HoverTool(tooltips=[('name', '@name'),('type', '@node_type')]) plot = figure(title=network_name+": Host virus bipartite network", plot_width=1200, plot_height=1200, tools=['pan','wheel_zoom','reset','box_zoom','tap' ]) """ plot main circles 1. Plot only nodes according to their positions """ r_circles = plot.circle('x', 'y', source=nodes_source, size=10, color= "colors", alpha=0.5, level = 'overlay',) """ Function Get data for generation of edges """ def get_edges_specs(_network, _layout): c = dict(xs=[], ys=[], alphas=[]) #print d weights = [d['weight'] for u, v, d in _network.edges(data=True)] max_weight = max(weights) calc_alpha = lambda h: 0.1 + 0.6 * (h / max_weight) # example: { ..., ('user47', 'da_bjoerni', {'weight': 3}), ... } for u, v, data in _network.edges(data=True): c['xs'].append([_layout[u][0], _layout[v][0]]) c['ys'].append([_layout[u][1], _layout[v][1]]) c['alphas'].append(calc_alpha(data['weight'])) return c """ get the data for edges """ lines_source = ColumnDataSource(get_edges_specs(DG, layout)) """ plot edge lines """ r_lines = plot.multi_line('xs', 'ys', line_width=1.5, alpha=1 , color='#b3b6b7', source=lines_source) """Centrality """ centrality = nx.algorithms.centrality.betweenness_centrality(DG) """ first element are nodes again """ _, nodes_centrality = zip(*centrality.items()) max_centraliy = max(nodes_centrality) nodes_source.add([7 + 15 * t / max_centraliy for t in nodes_centrality], 'centrality') """Communities""" import community partition = community.best_partition(network) #import community #python-louvain #partition = community.best_partition(DG) p_, nodes_community = zip(*partition.items()) nodes_source.add(nodes_community, 'community') community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628', '#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec', '#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d', '#666666'] nodes_source.add([community_colors[t % len(community_colors)] for t in nodes_community],'community_color') """Host Type colour""" """Update the plot with communities and Centrality""" r_circles.glyph.size = 'centrality' hover = HoverTool(tooltips=[('', '@name')], renderers=[r_circles]) plot.add_tools(hover) output_file(network_name+"_bipartite.html") show(plot) return DG ############################################################################################# ############################################################################################# def construct_unipartite_virus_virus_network(dataframe, network_name, layout_func = 'fruchterman_reingold', plot= False, filter_file= False, taxonomic_filter = None, return_df = False): """first construct bipartite network""" if filter_file: BPnx = construct_bipartite_host_virus_network(dataframe = dataframe, network_name= network_name, plot=False, filter_file= True, taxonomic_filter = taxonomic_filter) else: BPnx = construct_bipartite_host_virus_network(dataframe = dataframe, network_name= network_name, plot=False, filter_file= False, taxonomic_filter = taxonomic_filter) #if data_filename: # """Importing all the data # data: """ # if ".pickle" in data_filename: # data = pd.read_pickle(data_filename,) # else: # data = pd.read_csv(data_filename, encoding='ISO-8859-1', low_memory=False) data = dataframe data['ScientificName'] = data['ScientificName'].str.replace('[^\x00-\x7F]','') if taxonomic_filter: data = data[data.viral_family == taxonomic_filter] """hosttaxa: creating dataframe of unique hosts and their characteristics to generate nodes""" hosttaxa = data.groupby(['ScientificName']).size().reset_index().rename(columns={0:'count'}) """vlist: creating list of unique viruses to generate nodes""" virus_dataframe = data.groupby(['virus_name', 'viral_family']).size().reset_index().rename(columns={0:'count'}) vlist = data.virus_name.dropna().unique().tolist() """Here we will copllapse the Bipartite network to monopartite Nodes will be viruses Edges will be hosts they share the virus with""" df = pd.DataFrame(list(itertools.combinations(vlist, 2))) df.columns = ['Virus1', 'Virus2'] def get_n_shared_hosts(c): return len(list(nx.common_neighbors(BPnx, c['Virus1'],c['Virus2']))) df['n_shared_hosts'] = df.apply(get_n_shared_hosts, axis=1) #"""removing pairs with 0 shared hosts""" #df.drop(df[df.n_shared_hosts == 0].index, inplace=True) def addsharedhosts (c): return sorted(nx.common_neighbors(BPnx, c['Virus1'],c['Virus2'])) df["shared_hosts"] = df.apply(addsharedhosts, axis=1) print ('we have '+str(df.shape[0])+' virus pairs in our model') """Creating the a network now using the df EDGES will be weighted according to number of shared hosts""" VS_unx = nx.Graph() """Initiating virus nodes""" for index, row in virus_dataframe.iterrows(): VS_unx.add_node(row['virus_name'], type="virus", ViralFamily = str(row['viral_family']), bipartite = 1) #for virus in pd.unique(df[['Virus1', 'Virus2']].values.ravel()).tolist(): # VS_unx.add_node(virus, type="virus", virusname = virus, bipartite = 1) """Iterating through the raw data to add Edges if a virus is found in a host""" for index, row in df.iterrows(): if row['n_shared_hosts'] > 0: VS_unx.add_edge(row['Virus1'], row['Virus2'], weight = row['n_shared_hosts'], hosts = ','.join(row['shared_hosts'])) """Creating positions of the nodes""" if layout_func == 'fruchterman_reingold': layout = nx.fruchterman_reingold_layout(VS_unx, scale=2 )#k = 0.05, iterations=500 elif layout_func =='spring': layout = nx.spring_layout(VS_unx, k = 0.05, scale=2) elif layout_func =='circular': layout = nx.circular_layout(VS_unx, scale=1, center=None, dim=2) elif layout_func == 'kamada': layout = nx.kamada_kawai_layout(VS_unx, scale=1, center=None, dim=2) elif layout_func == 'spectral': layout = nx.spectral_layout(VS_unx, scale=1, center=None, dim=2) else: layout = nx.fruchterman_reingold_layout(VS_unx, scale=2 )#k = 0.05, iterations=500 """write graph """ #nx.write_graphml(VS_unx, network_name+"unipartite.graphml") if plot: plot_unipartite_network(title = network_name,network = VS_unx, network_name = network_name, layout_func = layout_func) if return_df: return df, VS_unx ####################################################################################################### ####################################################################################################### def calculate_features(data_frame, network, Species_file_name, data_path, virus_df, long = False): print('calculate_features function is in function file 1st function') print ('calculating topographical features') ################################################################################################################################ ################################################################################################################################ ################################################################################################################################ ################################################################################################################################ print ('calculating Jaccard coefficients') def jaccard (c): return sorted(nx.jaccard_coefficient(network, [(c['Virus1'],c['Virus2'])]))[0][2] data_frame["jaccard"] = data_frame.apply(jaccard, axis=1) ################################################################################################################################ ################################################################################################################################ def hasShortestPath (c): return nx.has_path(network, c['Virus1'], c['Virus2']) data_frame["hasPath"] = data_frame.apply(hasShortestPath, axis=1) print ('calculating shortest path length') def ShortPathLen(c): if c["hasPath"]: return nx.shortest_path_length(network, c['Virus1'], c['Virus2']) else: return np.nan data_frame["ShortPathLen"] = data_frame.apply(ShortPathLen, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating adamic/adar index') def adar (c): return sorted(nx.adamic_adar_index(network, [(c['Virus1'],c['Virus2'])]))[0][2] data_frame["adamic_adar"] = data_frame.apply(adar, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating Resource coefficients') def resource (c): return sorted(nx.resource_allocation_index(network, [(c['Virus1'],c['Virus2'])]))[0][2] data_frame["resource"] = data_frame.apply(resource, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating preferential attachment coefficients') def preferential (c): return sorted(nx.preferential_attachment(network, [(c['Virus1'],c['Virus2'])]))[0][2] data_frame["preferential_attach"] = data_frame.apply(preferential, axis=1) ################################################################################################################################ ################################################################################################################################ if long: ################################################################################################################################ ################################################################################################################################ print ('listing neighbors') def neighbors (c): l = sorted(nx.common_neighbors(network, c['Virus1'],c['Virus2'])) return str(l)[1:-1] data_frame["neighbors"] = data_frame.apply(neighbors, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating number of neighbors') def neighbors_n (c): return len(sorted(nx.common_neighbors(network, c['Virus1'],c['Virus2']))) data_frame["neighbors_n"] = data_frame.apply(neighbors_n, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating difference in betweenness centrality') btw = nx.betweenness_centrality(network, 25) def betweenDiff(c): return abs(btw[c['Virus1']] - btw[c['Virus2']]) data_frame["betweeness_diff"] = data_frame.apply(betweenDiff, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating node clusters') from community import community_louvain partition = community_louvain.best_partition(network) ################################################################################################################################ ################################################################################################################################ def virus1_cluster(c): return partition[c['Virus1']] data_frame['VirusCluster1'] = data_frame.apply(virus1_cluster, axis=1) def virus2_cluster(c): return partition[c['Virus2']] data_frame['VirusCluster2'] = data_frame.apply(virus2_cluster, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating if nodes are in a same cluster') def in_same_cluster(c): if(partition[c['Virus1']] == partition[c['Virus2']]): return True else: return False data_frame["in_same_cluster"] = data_frame.apply(in_same_cluster, axis=1) ################################################################################################################################ ################################################################################################################################ print ('calculating difference in degree') degree = nx.degree(network) def degreeDiff(c): return abs(degree[c['Virus1']] - degree[c['Virus2']]) data_frame["degree_diff"] = data_frame.apply(degreeDiff, axis=1) ################################################################################################################################ ################################################################################################################################ if long: IUCN = pd.read_csv(data_path+ Species_file_name) IUCN["ScientificName"] = IUCN["Genus"].map(str) +' '+IUCN["Species"] IUCN.loc[IUCN.ScientificName== 'Homo sapiens', 'Order'] = 'Humans' ################################################################################################################################ ################################################################################################################################ print ('getting Order and Family values for shared hosts') def getOrders (c): orderlist = [] if len(c.shared_hosts) > 0: for h in (c.shared_hosts): try: orderlist.append(IUCN.loc[IUCN['ScientificName'] == h, 'Order'].iloc[0]) except: orderlist.append('MatchNotFound') return orderlist data_frame['orders'] = data_frame.apply(getOrders, axis=1) ################################################################################################################################ ################################################################################################################################ def getFamily (c): orderlist = [] if len(c.shared_hosts) > 0: for h in (c.shared_hosts): try: orderlist.append(IUCN.loc[IUCN['ScientificName'] == h, 'Family'].iloc[0]) except: orderlist.append('MatchNotFound') return orderlist data_frame['families'] = data_frame.apply(getFamily, axis=1) ################################################################################################################################ ################################################################################################################################ def OrderRichness (c): return len(set(c.orders)) def FamilyRichness (c): return len(set(c.families)) data_frame['OrderRichness'] = data_frame.apply(OrderRichness, axis=1) data_frame['FamilyRichness'] = data_frame.apply(FamilyRichness, axis=1) print ('richness calculations complete') ################################################################################################################################ ################################################################################################################################ print ('calculating ShannonH index of diversity for shared Orders and Familes of taxa') def shannon_order(c): total = len(c.orders) counts = pd.Series(c.orders).value_counts().tolist() h = sum(map(lambda x:abs(np.log(x/float(total)))*(x/float(total)), counts)) return h data_frame['Order_H'] = data_frame.apply(shannon_order, axis=1) ################################################################################################################################ ################################################################################################################################ def shannon_family(c): total = len(c.families) counts = pd.Series(c.families).value_counts().tolist() h = sum(map(lambda x:abs(np.log(x/float(total)))*(x/float(total)), counts)) return h data_frame['Familiy_H'] = data_frame.apply(shannon_family, axis=1) ################################################################################################################################ ################################################################################################################################ print ('Matching Virus Families') data_frame = pd.merge(data_frame,virus_df[['virus_name','viral_family','PubMed_Search_ln']], left_on='Virus1', right_on='virus_name', how='left') data_frame = pd.merge(data_frame,virus_df[['virus_name','viral_family', 'PubMed_Search_ln']], left_on='Virus2', right_on='virus_name', how='left') data_frame['ViralFamily1'] = data_frame['viral_family_x'] data_frame['ViralFamily2'] = data_frame['viral_family_y'] data_frame['PubMed_Search_ln1'] = data_frame['PubMed_Search_ln_x'] data_frame['PubMed_Search_ln2'] = data_frame['PubMed_Search_ln_y'] del data_frame['viral_family_y'] del data_frame['viral_family_x'] del data_frame['PubMed_Search_ln_x'] del data_frame['PubMed_Search_ln_y'] del data_frame['virus_name_x'] del data_frame['virus_name_y'] def MatchFamily(c): if c.ViralFamily1 == c.ViralFamily2: return 'True' else: return 'False' data_frame['FamilyMatch'] = data_frame.apply(MatchFamily, axis=1) ################################################################################################################################ ################################################################################################################################ print ('difference in PubMed hits') def PubMed_hits(c): return abs(c.PubMed_Search_ln1 - c.PubMed_Search_ln2) data_frame['PubMed_diff'] = data_frame.apply(PubMed_hits, axis=1) ################################################################################################################################ ################################################################################################################################ data_frame['hasPath'] = np.where(data_frame['hasPath']== True, 1, 0) data_frame['in_same_cluster'] =np.where(data_frame['in_same_cluster']== True, 1, 0) data_frame['FamilyMatch'] =np.where(data_frame['FamilyMatch']== 'True', 1, 0) data_frame['ShortPathLen'].fillna(0, inplace = True) data_frame['Link'] =np.where(data_frame['n_shared_hosts']>= 1, 1, 0) print (data_frame.shape) return data_frame ####################################################################################################### ####################################################################################################### def interactive_plot(network, network_name, layout_func = 'fruchterman_reingold'): plot = Plot(plot_width=800, plot_height=800, x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1)) plot.title.text = network_name plot.add_tools(HoverTool( tooltips=[('','@index')]),TapTool(), BoxSelectTool(), BoxZoomTool(), ResetTool(), PanTool(), WheelZoomTool()) if layout_func == 'fruchterman_reingold': graph_renderer = graphs.from_networkx(network, nx.fruchterman_reingold_layout, scale=1, center=(0,0)) elif layout_func =='spring': graph_renderer = graphs.from_networkx(network, nx.spring_layout, scale=1, center=(0,0)) elif layout_func =='circular': graph_renderer = graphs.from_networkx(network, nx.circular_layout, scale=1, center=(0,0)) elif layout_func == 'kamada': graph_renderer = graphs.from_networkx(network, nx.kamada_kawai_layout, scale=1, center=(0,0)) elif layout_func == 'spectral': graph_renderer = graphs.from_networkx(network, nx.spectral_layout, scale=1, center=(0,0)) else: graph_renderer = graphs.from_networkx(network, nx.fruchterman_reingold_layout, scale=1, center=(0,0)) centrality = nx.algorithms.centrality.betweenness_centrality(network) """ first element are nodes again """ _, nodes_centrality = zip(*centrality.items()) max_centraliy = max(nodes_centrality) c_centrality = [7 + 15 * t / max_centraliy for t in nodes_centrality] import community #python-louvain partition = community.best_partition(network) p_, nodes_community = zip(*partition.items()) community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628', '#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec', '#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d', '#666666'] colors = [community_colors[t % len(community_colors)] for t in nodes_community] graph_renderer.node_renderer.data_source.add(c_centrality, 'centrality') graph_renderer.node_renderer.data_source.add(colors, 'colors') graph_renderer.node_renderer.glyph = Circle(size='centrality', fill_color='colors') graph_renderer.node_renderer.selection_glyph = Circle(size='centrality', fill_color=Spectral4[2]) graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1]) graph_renderer.edge_renderer.glyph = MultiLine(line_color="#757474", line_alpha=0.2, line_width=2) graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=3) graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=1) graph_renderer.selection_policy = graphs.NodesAndLinkedEdges() graph_inspection_policy = graphs.NodesOnly() #graph_renderer.inspection_policy = graphs.EdgesAndLinkedNodes() plot.renderers.append(graph_renderer) #output_file("interactive_graphs.html") return plot ####################################################################################################### ####################################################################################################### def get_observed_network_data(Gc, BPnx, i, data_path, virus_df, Species_file_name): IUCN = pd.read_csv(data_path+ Species_file_name,) IUCN["ScientificName"] = IUCN["Genus"].map(str) +' '+IUCN["Species"] IUCN.loc[IUCN.ScientificName== 'Homo sapiens', 'Order'] = 'Humans' print('we have ' + str(len(Gc.edges)) + ' edges in complete network') ## randomly assigning groups to all edges to remove ## Copying Gc to Go Go = Gc.copy() # remove group 1 ebunch = ((u, v) for u, v, d in Go.edges(data=True) if d['remove_group'] == i) Go.remove_edges_from(ebunch) print('we have ' + str(len(Go.edges)) + ' edges in observed network ' + str(i)) net_name = 'Observed network ' + str(i) print (net_name) plot_unipartite_network( title=net_name, network=Go, network_name=net_name, layout_func='fruchterman_reingold') """ Develop Dataset for the Go """ print('\nDevelop Dataset for the Go\n') """STEP 2""" vlist = list(Go.nodes()) """STEP 3""" d = pd.DataFrame(list(itertools.combinations(vlist, 2))) d.columns = ['Virus1', 'Virus2'] """STEP 4""" def get_n_shared_hosts(c): return len(list(nx.common_neighbors(BPnx, c['Virus1'], c['Virus2']))) d['n_shared_hosts_c'] = d.apply(get_n_shared_hosts, axis=1) def addsharedhosts(c): return sorted(nx.common_neighbors(BPnx, c['Virus1'], c['Virus2'])) d["shared_hosts_c"] = d.apply(addsharedhosts, axis=1) print ('getting Order and Family values for shared hosts') def getOrders (c): orderlist = [] if len(c.shared_hosts_c) > 0: for h in (c.shared_hosts_c): try: orderlist.append(IUCN.loc[IUCN['ScientificName'] == h, 'Order'].iloc[0]) except: orderlist.append('MatchNotFound') return orderlist d['orders_label'] = d.apply(getOrders, axis=1) """STEP 5""" ebunch = ((u, v) for u, v, d in Gc.edges(data=True) if d['remove_group'] == i) to_remove = pd.DataFrame(list(ebunch)) to_remove.columns = ['Virus1', 'Virus2'] to_remove['n_shared_hosts'] = 0 to_remove['shared_hosts'] = [list() for x in range(len(to_remove.index))] """STEP 6""" m =
pd.merge(d, to_remove, on=['Virus1', 'Virus2'], how='left')
pandas.merge
from datetime import datetime, timedelta import numpy as np import pytest import pytz from pandas._libs.tslibs import iNaT import pandas.compat as compat from pandas import ( DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp, isna) from pandas.core.arrays import PeriodArray from pandas.util import testing as tm @pytest.mark.parametrize("nat,idx", [(Timestamp("NaT"), DatetimeIndex), (Timedelta("NaT"), TimedeltaIndex), (Period("NaT", freq="M"), PeriodArray)]) def test_nat_fields(nat, idx): for field in idx._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(NaT, field) assert np.isnan(result) result = getattr(nat, field) assert np.isnan(result) for field in idx._bool_ops: result = getattr(NaT, field) assert result is False result = getattr(nat, field) assert result is False def test_nat_vector_field_access(): idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"]) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(idx, field) expected = Index([getattr(x, field) for x in idx]) tm.assert_index_equal(result, expected) ser = Series(idx) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) for field in DatetimeIndex._bool_ops: result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"]) def test_identity(klass, value): assert klass(value) is NaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan]) def test_equality(klass, value): if klass is Period and value == "": pytest.skip("Period cannot parse empty string") assert klass(value).value == iNaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta]) @pytest.mark.parametrize("method", ["round", "floor", "ceil"]) @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"]) def test_round_nat(klass, method, freq): # see gh-14940 ts = klass("nat") round_method = getattr(ts, method) assert round_method(freq) is ts @pytest.mark.parametrize("method", [ "astimezone", "combine", "ctime", "dst", "fromordinal", "fromtimestamp", "isocalendar", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "toordinal", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "timestamp" ]) def test_nat_methods_raise(method): # see gh-9513, gh-17329 msg = "NaTType does not support {method}".format(method=method) with pytest.raises(ValueError, match=msg): getattr(NaT, method)() @pytest.mark.parametrize("method", [ "weekday", "isoweekday" ]) def test_nat_methods_nan(method): # see gh-9513, gh-17329 assert np.isnan(getattr(NaT, method)()) @pytest.mark.parametrize("method", [ "date", "now", "replace", "today", "tz_convert", "tz_localize" ]) def test_nat_methods_nat(method): # see gh-8254, gh-9513, gh-17329 assert getattr(NaT, method)() is NaT @pytest.mark.parametrize("get_nat", [ lambda x: NaT, lambda x: Timedelta(x), lambda x:
Timestamp(x)
pandas.Timestamp
import os import shutil import sys from collections import namedtuple from datetime import datetime, date from sqlalchemy import create_engine import pandas as pd import numpy as np BASE_DIR = os.getenv('ETL_HOME', '/home/etl') HeaderMapping = namedtuple('HeaderMapping', ['raw_name', 'output_name']) LOCATION_ID_HEADER = 'location_id' DATE_ID_HEADER = 'date_id' CITY_HEADER = 'city' STATE_HEADER = 'state' COUNTRY_HEADER = 'country' COMBINED_HEADER = 'combined_key' FIPS_HEADER = 'FIPS' UID_HEADER = 'UID' LATITUDE_HEADER = 'latitude' LONGITUDE_HEADER = 'longitude' CASES_HEADER = 'cases' DEATHS_HEADER = 'deaths' RECOVERIES_HEADER = 'recoveries' # Tests performed per 100,000 people TESTING_RATE_HEADER = 'testing_rate' # hospitalized / number cases HOSPITALIZATION_RATE_HEADER = 'hospitalization_rate' # Cases per 100,000 people CASES_100K_HEADER = 'cases_100k' DATE_HEADER = 'date' POPULATION_HEADER = 'population' state_v1 = HeaderMapping('Province/State', STATE_HEADER) state_v2 = HeaderMapping('Province_State', STATE_HEADER) country_v1 = HeaderMapping('Country/Region', COUNTRY_HEADER) country_v2 = HeaderMapping('Country_Region', COUNTRY_HEADER) cases_v1 = HeaderMapping('Confirmed', CASES_HEADER) deaths_v1 = HeaderMapping('Deaths', DEATHS_HEADER) recoveries_v1 = HeaderMapping('Recovered', RECOVERIES_HEADER) testing_rate_v1 = HeaderMapping('Testing_Rate', TESTING_RATE_HEADER) hospitalization_rate_v1 = HeaderMapping('Hospitalization_Rate', HOSPITALIZATION_RATE_HEADER) cases_100K_v1 = HeaderMapping('Incidence_Rate', CASES_100K_HEADER) cases_100K_v2 = HeaderMapping('Incident_Rate', CASES_100K_HEADER) latitude_v1 = HeaderMapping('Latitude', LATITUDE_HEADER) latitude_v2 = HeaderMapping('Lat', LATITUDE_HEADER) longitude_v1 = HeaderMapping('Longitude', LONGITUDE_HEADER) longitude_v2 = HeaderMapping('Long_', LONGITUDE_HEADER) known_headers = [ 'Province/State,Country/Region,Last Update,Confirmed,Deaths,Recovered', 'Province/State,Country/Region,Last Update,Confirmed,Deaths,Recovered,Latitude,Longitude', 'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key', 'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key,Incidence_Rate,Case-Fatality_Ratio', 'FIPS,Admin2,Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,Combined_Key,Incident_Rate,Case_Fatality_Ratio', 'Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,FIPS,Incident_Rate,People_Tested,People_Hospitalized,Mortality_Rate,UID,ISO3,Testing_Rate,Hospitalization_Rate', 'Province_State,Country_Region,Last_Update,Lat,Long_,Confirmed,Deaths,Recovered,Active,FIPS,Incident_Rate,Total_Test_Results,People_Hospitalized,Case_Fatality_Ratio,UID,ISO3,Testing_Rate,Hospitalization_Rate' ] header_transformation_mappings = [ state_v1, state_v2, country_v1, country_v2, cases_v1 , deaths_v1, recoveries_v1, testing_rate_v1, hospitalization_rate_v1, cases_100K_v1, cases_100K_v2, latitude_v1, latitude_v2, longitude_v1, longitude_v2, ] required_headers = [ CITY_HEADER, STATE_HEADER, COUNTRY_HEADER, LATITUDE_HEADER, LONGITUDE_HEADER, CASES_HEADER, DEATHS_HEADER, RECOVERIES_HEADER, TESTING_RATE_HEADER, HOSPITALIZATION_RATE_HEADER, CASES_100K_HEADER, ] load_headers = [ DATE_ID_HEADER, LOCATION_ID_HEADER, CASES_HEADER, RECOVERIES_HEADER, DEATHS_HEADER, CASES_100K_HEADER, TESTING_RATE_HEADER, HOSPITALIZATION_RATE_HEADER, ] def remap_header_name(header_name): '''Given any string, match it to an instance of HeaderMapping and return the output_name of the match or the original header_name if no match it found. Args: header_name (str): raw input header name to transform Returns: A header name that underwent transformation ''' for hm in header_transformation_mappings: if header_name == hm.raw_name: return hm.output_name return header_name def transform_headers(df, ds): '''Takes a Pandas DataFrame, validates the headers are from an an expected list of headers the ETL pipeline knows apriori and remaps the names of the headers to a uniform set of names and order Args: df (DataFrame): input DataFrame to transform ds (str or datetime): the date stamp the DataFrame contains data for Returns: A transformed DataFrame with uniform header names and order ''' if isinstance(ds, str): ds = datetime.strptime(ds, '%m-%d-%Y') elif not isinstance(ds, datetime): raise TypeError('ds argument is expected to be either a datetime instance or str representing one') transformed_df = df.rename(columns=remap_header_name) keep_columns = [col for col in transformed_df.columns if col in required_headers] add_columns = [col for col in required_headers if col not in keep_columns] transformed_df = transformed_df[keep_columns] for col in add_columns: transformed_df[col] = np.nan transformed_df[DATE_HEADER] = ds expected_order = [DATE_HEADER] + required_headers transformed_df = transformed_df[expected_order] if 'Combined_Key' not in df.columns: combined_key_rows = [] for idx, row in transformed_df.iterrows(): combined = '' if not pd.isnull(row.city) and row.city: combined += row.city + ', ' if not pd.isnull(row.state) and row.state and row.state != row.country: combined += row.state + ', ' if not pd.isnull(row.country) and row.country: combined += row.country combined_key_rows.append(combined) transformed_df[COMBINED_HEADER] = combined_key_rows else: transformed_df[COMBINED_HEADER] = df.Combined_Key transformed_df[COMBINED_HEADER] = transformed_df[COMBINED_HEADER].str.lower() if 'FIPS' not in df.columns: transformed_df[FIPS_HEADER] = np.nan else: transformed_df[FIPS_HEADER] = df.FIPS if 'UID' not in df.columns: transformed_df[UID_HEADER] = np.nan else: transformed_df[UID_HEADER] = df.UID return transformed_df COVID_DATA_START_DATE = date(2020, 1, 22) def make_date_dims(start, end=None): date_range = pd.date_range(start=start, end=end or date.today(), freq='1D') data = { 'date_id': list(range(1, len(date_range)+1)), 'date': date_range.date, 'year': date_range.year, 'month': date_range.month, 'day_of_month': date_range.day, 'day_of_week': date_range.weekday } return pd.DataFrame(data, index=date_range) def parse_city_from_combined_key(key): parts = key.split(',') if len(parts) == 3: return parts[0] return None def transform_global(): INPUT_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19', 'csse_covid_19_data', 'csse_covid_19_daily_reports') print("Input Dir: " + INPUT_DATA_DIR) TRANSFORMED_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19-TRANSFORMED') if os.path.exists(TRANSFORMED_DATA_DIR): shutil.rmtree(TRANSFORMED_DATA_DIR) os.makedirs(TRANSFORMED_DATA_DIR) print("Output Dir: " + TRANSFORMED_DATA_DIR) # Fix any BOM files (there are some early on ones in Jan 2020, could be more later) input_files = [f for f in os.listdir(INPUT_DATA_DIR) if f.endswith('.csv')] for f in input_files: input_f = os.path.join(INPUT_DATA_DIR, f) output_f = os.path.join(TRANSFORMED_DATA_DIR, 'global_'+f) with open(input_f, mode='r', encoding='utf-8-sig') as fin, open(output_f, mode='w', encoding='utf-8') as fout: fout.write(fin.read()) # remap headers to consistent format files = [f for f in os.listdir(TRANSFORMED_DATA_DIR) if f.startswith('global_')] for f in files: fname, fext = os.path.splitext(f) date_str = fname.replace('global_', '') file_path = os.path.join(TRANSFORMED_DATA_DIR, f) with open(file_path) as fp: headers = fp.readline().strip() if headers not in known_headers: print("{} has unrecognized headers {}".format(f, headers)) sys.exit(1) print('Transforming {}'.format(f)) df = pd.read_csv(file_path) transformed_df = transform_headers(df, date_str) transformed_path = os.path.join(TRANSFORMED_DATA_DIR, 'transformed_'+date_str+'.csv') transformed_df.to_csv(transformed_path, index=False) def transform_us(): INPUT_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19', 'csse_covid_19_data', 'csse_covid_19_daily_reports_us') print("Input Dir: " + INPUT_DATA_DIR) TRANSFORMED_DATA_DIR = os.path.join(BASE_DIR, 'COVID-19-TRANSFORMED') if not os.path.exists(TRANSFORMED_DATA_DIR): os.makedirs(TRANSFORMED_DATA_DIR) print("Output Dir: " + TRANSFORMED_DATA_DIR) # Fix any BOM files (there are some early on ones in Jan 2020, could be more later) input_files = [f for f in os.listdir(INPUT_DATA_DIR) if f.endswith('.csv')] for f in input_files: input_f = os.path.join(INPUT_DATA_DIR, f) output_f = os.path.join(TRANSFORMED_DATA_DIR, 'us_'+f) with open(input_f, mode='r', encoding='utf-8-sig') as fin, open(output_f, mode='w', encoding='utf-8') as fout: fout.write(fin.read()) # remap headers to consistent format files = [f for f in os.listdir(TRANSFORMED_DATA_DIR) if f.startswith('us_')] for f in files: fname, fext = os.path.splitext(f) date_str = fname.replace('us_', '') file_path = os.path.join(TRANSFORMED_DATA_DIR, f) with open(file_path) as fp: headers = fp.readline().strip() df = pd.read_csv(file_path) if headers not in known_headers: print("{} has unrecognized headers {}".format(f, headers)) df.head() sys.exit(1) print('Transforming {}'.format(f)) transformed_df = transform_headers(df, date_str) transformed_path = os.path.join(TRANSFORMED_DATA_DIR, 'transformed_'+date_str+'.csv') if os.path.exists(transformed_path): global_df = pd.read_csv(transformed_path) # for country in transformed_df.country.unique(): # global_df = global_df.loc[global_df.country != country] transformed_df = pd.concat([transformed_df, global_df]).drop_duplicates() transformed_df.to_csv(transformed_path, index=False) COVID_TMP_FACTS_TBL = 'tmp_covid_facts' COVID_DATE_DIM_TBL = 'date_dim' COVID_LOCATION_DIM_TBL = 'location_dim' def create_sql_engine(): return create_engine('postgresql://etl:etl@localhost:5432/dw') def validate_location_sql_entry(row): values = ( row.location_id, row.country, row.state if pd.notnull(row.state) else None, row.city if pd.notnull(row.city) else None, row.latitude, row.longitude, int(row.population) if pd.notnull(row.population) else None ) return values def validate_covid_facts_sql_entry(row): values = ( int(row.date_id), int(row.location_id), int(row.cases) if pd.notnull(row.cases) else None, int(row.recoveries) if pd.notnull(row.recoveries) else None, int(row.deaths) if pd.notnull(row.deaths) else None, row.cases_100k if
pd.notnull(row.cases_100k)
pandas.notnull
# -*- coding: utf-8 -*- """ Created on Wed Mar 6 09:43:07 2019 Estimate Transfer Function Quality * based on simple statistics @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= import os import glob import numpy as np import pandas as pd from scipy import interpolate from mtpy.core import mt # ============================================================================= # # ============================================================================= class EMTFStats(object): """ Class to estimate data quality of EM transfer functions :param tf_dir: transfer function directory :type tf_dir: string :param stat_limits: criteria for statistics based on a 0-5 rating scale :type stat_limits: dictionary :Example: :: >>> from usgs_archive import estimate_tf_quality_factor as tfq >>> edi_dir = r"/home/edi_folders/survey_01" >>> q = EMTFStats() >>> stat_df = q.compute_statistics(edi_dir) >>> q_df = q.estimate_data_quality(stat_df=stat_df) >>> s_df = q.summarize_data_quality(q_df) """ def __init__(self, tf_dir=None, *args, **kwargs): self.tf_dir = tf_dir self.stat_limits = { "std": { 5: (0, 0.5), 4: (0.5, 1.25), 3: (1.25, 2.5), 2: (2.5, 10.0), 1: (10.0, 25.0), 0: (25.0, 1e36), }, "corr": { 5: (0.975, 1.0), 4: (0.9, 0.975), 3: (0.75, 0.9), 2: (0.5, 0.75), 1: (0.25, 0.5), 0: (-1.0, 0.25), }, "diff": { 5: (0.0, 0.5), 4: (0.5, 1.0), 3: (1.0, 2.0), 2: (2.0, 5.0), 1: (5.0, 10.0), 0: (10.0, 1e36), }, "fit": { 5: (0, 5), 4: (5, 15), 3: (15, 50), 2: (50, 100), 1: (100, 200), 0: (200, 1e36), }, "bad": { 5: (0, 2), 4: (2, 4), 3: (4, 10), 2: (10, 15), 1: (15, 20), 0: (20, 1e36), }, } self.z_dict = {(0, 0): "xx", (0, 1): "xy", (1, 0): "yx", (1, 1): "yy"} self.t_dict = {(0, 0): "x", (0, 1): "y"} self.types = ( [ "{3}_{0}{1}_{2}".format(ii, jj, kk, ll) for ii in ["x", "y"] for jj in ["x", "y"] for kk in ["std", "corr", "diff", "fit"] for ll in ["res", "phase"] ] + [ "{2}_{0}_{1}".format(ii, kk, ll) for ii in ["x", "y"] for kk in ["std", "corr", "diff", "fit"] for ll in ["tipper"] ] + [ "bad_points_{2}_{0}{1}".format(ii, jj, ll) for ii in ["x", "y"] for jj in ["x", "y"] for ll in ["res", "phase"] ] + ["bad_points_tipper_{0}".format(ii) for ii in ["x", "y"]] ) def locate_bad_res_points(self, res): """ try to locate bad points to remove """ ### estimate levearge points, or outliers ### estimate the median med = np.median(res) ### locate the point closest to the median tol = np.abs(res - np.median(res)).min() m_index = np.where( (abs(res - med) >= tol * 0.95) & (abs(res - med) <= tol * 1.05) )[0][0] r_index = m_index + 1 bad_points = [] # go to the right while r_index < res.shape[0]: if abs(res[r_index] - res[r_index - 1]) > np.cos(np.pi / 4) * res[r_index]: bad_points.append(r_index) r_index += 1 # go to the left l_index = m_index - 1 while l_index > -1: if abs(res[l_index] - res[l_index - 1]) > np.cos(np.pi / 4) * res[l_index]: bad_points.append(l_index) l_index -= 1 return np.array(bad_points) def locate_bad_phase_points(self, phase, test=5): """ try to locate bad points to remove """ ### estimate levearge points, or outliers ### estimate the median med = np.median(phase) ### locate the point closest to the median ### locate the point closest to the median tol = np.abs(phase - np.median(phase)).min() m_index = np.where( (abs(phase - med) >= tol * 0.95) & (abs(phase - med) <= tol * 1.05) )[0][0] r_index = m_index + 1 bad_points = [] # go to the right while r_index < phase.shape[0]: if abs(phase[r_index] - phase[r_index - 1]) > test: bad_points.append(r_index) r_index += 1 # go to the left l_index = m_index - 1 while l_index > -1: if abs(phase[l_index] - phase[l_index - 1]) > test: bad_points.append(l_index) l_index -= 1 return np.array(bad_points) def locate_bad_tipper_points(self, tipper, test=0.2): """ try to locate bad points to remove """ ### estimate levearge points, or outliers ### estimate the median med = np.median(tipper) ### locate the point closest to the median tol = np.abs(tipper - np.median(tipper)).min() m_index = np.where( (abs(tipper - med) >= tol * 0.95) & (abs(tipper - med) <= tol * 1.05) )[0][0] r_index = m_index + 1 bad_points = [] # go to the right while r_index < tipper.shape[0]: if abs(tipper[r_index] - tipper[r_index - 1]) > test: bad_points.append(r_index) r_index += 1 # go to the left l_index = m_index - 1 while l_index > -1: if abs(tipper[l_index] - tipper[l_index - 1]) > test: bad_points.append(l_index) l_index -= 1 return np.array(bad_points) def compute_statistics(self, tf_dir=None): """ Compute statistics of the transfer functions in a given directory. Statistics are: * one-lag autocorrelation coefficient, estimator for smoothness * average of errors on components * fit to a least-squres smooth curve * normalized standard deviation of the first derivative, another smoothness estimator :param tf_dir: path to directory of transfer functions :type tf_dir: string :returns: data frame of all the statistics estimated :rtype: pandas.DataFrame .. note:: Writes a file to the tf_dir named tf_quality_statistics.csv """ if tf_dir is not None: self.tf_dir = tf_dir edi_list = glob.glob("{0}\*.edi".format(self.tf_dir)) stat_array = np.zeros( len(edi_list), dtype=[(key, np.float) for key in sorted(self.types)] ) station_list = [] for kk, edi in enumerate(edi_list): mt_obj = mt.MT(edi) station_list.append(mt_obj.station) for ii in range(2): for jj in range(2): flip = False comp = self.z_dict[(ii, jj)] ### locate bad points bad_points_res = self.locate_bad_res_points( mt_obj.Z.resistivity[:, ii, jj] ) stat_array[kk]["bad_points_res_{0}".format(comp)] = max( [1, len(bad_points_res)] ) bad_points_phase = self.locate_bad_phase_points( mt_obj.Z.phase[:, ii, jj] ) stat_array[kk]["bad_points_phase_{0}".format(comp)] = max( [1, len(bad_points_res)] ) ### need to get the data points that are within the reasonable range ### and not 0 nz_index = np.nonzero(mt_obj.Z.resistivity[:, ii, jj]) nz_index = np.delete(nz_index, bad_points_res) nz_index = np.delete(nz_index, bad_points_phase) f = mt_obj.Z.freq[nz_index] res = mt_obj.Z.resistivity[nz_index, ii, jj] res_err = mt_obj.Z.resistivity_err[nz_index, ii, jj] phase = mt_obj.Z.phase[nz_index, ii, jj] phase_err = mt_obj.Z.phase_err[nz_index, ii, jj] if len(f) < 2: print(mt_obj.station, comp, nz_index) continue # need to sort the array to be ordered with assending # frequency. Check to see if f is ascending, if not flip if f[0] > f[1]: flip = True f = f[::-1] res = res[::-1] res_err = res_err[::-1] phase = phase[::-1] phase_err = phase_err[::-1] ### make parameter for least squares fit k = 7 # order of the fit # knots, has to be at least to the bounds of f t = np.r_[(f[0],) * (k + 1), [min(1, f.mean())], (f[-1],) * (k + 1)] ### estimate a least squares fit try: ls_res = interpolate.make_lsq_spline(f, res, t, k) ls_phase = interpolate.make_lsq_spline(f, phase, t, k) ### compute a standard deviation between the ls fit and data stat_array[kk]["res_{0}_fit".format(comp)] = ( res - ls_res(f) ).std() stat_array[kk]["phase_{0}_fit".format(comp)] = ( phase - ls_phase(f) ).std() except (ValueError, np.linalg.LinAlgError) as error: stat_array[kk]["res_{0}_fit".format(comp)] = np.NaN stat_array[kk]["phase_{0}_fit".format(comp)] = np.NaN print("{0} {1} {2}".format(mt_obj.station, comp, error)) ### taking median of the error is more robust stat_array[kk]["res_{0}_std".format(comp)] = np.median(res_err) stat_array[kk]["phase_{0}_std".format(comp)] = np.median(phase_err) ### estimate smoothness stat_array[kk]["res_{0}_corr".format(comp)] = np.corrcoef( res[0:-1], res[1:] )[0, 1] stat_array[kk]["phase_{0}_corr".format(comp)] = np.corrcoef( phase[0:-1], phase[1:] )[0, 1] ### estimate smoothness with difference stat_array[kk]["res_{0}_diff".format(comp)] = np.abs( np.median(np.diff(res)) ) stat_array[kk]["phase_{0}_diff".format(comp)] = np.abs( np.median(np.diff(phase)) ) ### compute tipper if ii == 0: tcomp = self.t_dict[(0, jj)] t_index = np.nonzero(mt_obj.Tipper.amplitude[:, 0, jj]) bad_points_t = self.locate_bad_tipper_points( mt_obj.Tipper.amplitude[:, 0, jj] ) stat_array[kk]["bad_points_tipper_{0}".format(tcomp)] = max( [1, len(bad_points_t)] ) t_index = np.delete(t_index, bad_points_t) if t_index.size == 0: continue else: tmag = mt_obj.Tipper.amplitude[t_index, 0, jj] tmag_err = mt_obj.Tipper.amplitude_err[t_index, 0, jj] tip_f = mt_obj.Tipper.freq[t_index] if flip: tmag = tmag[::-1] tmag_err = tmag_err[::-1] tip_f = tip_f[::-1] tip_t = np.r_[ (tip_f[0],) * (k + 1), [min(1, tip_f.mean())], (tip_f[-1],) * (k + 1), ] try: ls_tmag = interpolate.make_lsq_spline( tip_f, tmag, tip_t, k ) stat_array[kk]["tipper_{0}_fit".format(tcomp)] = np.std( tmag - ls_tmag(tip_f) ) except (ValueError, np.linalg.LinAlgError) as error: stat_array[kk]["tipper_{0}_fit".format(tcomp)] = np.NaN print( "{0} {1} {2}".format(mt_obj.station, tcomp, error) ) stat_array[kk][ "tipper_{0}_std".format(tcomp) ] = tmag_err.mean() stat_array[kk][ "tipper_{0}_corr".format(tcomp) ] = np.corrcoef(tmag[0:-1], tmag[1:])[0, 1] stat_array[kk]["tipper_{0}_diff".format(tcomp)] = np.std( np.diff(tmag) ) / abs(np.mean(np.diff(tmag))) ### write file df =
pd.DataFrame(stat_array, index=station_list)
pandas.DataFrame
from faultclass import Fault from faultclass import python_worker import pandas as pd from calculate_trigger import calculate_trigger_addresses from multiprocessing import Queue import logging logger = logging.getLogger(__name__) def run_goldenrun( config_qemu, qemu_output, data_queue, faultconfig, qemu_pre=None, qemu_post=None ): dummyfaultlist = [Fault(0, 0, 0, 0, 0, 0, 100, 0)] queue_output = Queue() goldenrun_config = {} goldenrun_config["qemu"] = config_qemu["qemu"] goldenrun_config["kernel"] = config_qemu["kernel"] goldenrun_config["plugin"] = config_qemu["plugin"] goldenrun_config["machine"] = config_qemu["machine"] if "max_instruction_count" in config_qemu: goldenrun_config["max_instruction_count"] = config_qemu["max_instruction_count"] if "memorydump" in config_qemu: goldenrun_config["memorydump"] = config_qemu["memorydump"] experiments = [] if "start" in config_qemu: pre_goldenrun = {"type": "pre_goldenrun", "index": -2, "data": {}} experiments.append(pre_goldenrun) goldenrun = {"type": "goldenrun", "index": -1, "data": {}} experiments.append(goldenrun) for experiment in experiments: if experiment["type"] == "pre_goldenrun": goldenrun_config["end"] = config_qemu["start"] # Set max_insn_count to ridiculous high number to never reach it goldenrun_config["max_instruction_count"] = 10000000000000 elif experiment["type"] == "goldenrun": if "start" in config_qemu: goldenrun_config["start"] = config_qemu["start"] if "end" in config_qemu: goldenrun_config["end"] = config_qemu["end"] if "start" in config_qemu and "end" in config_qemu: # Set max_insn_count to ridiculous high number to never reach it goldenrun_config["max_instruction_count"] = 10000000000000 logger.info(f"{experiment['type']} started...") python_worker( dummyfaultlist, goldenrun_config, experiment["index"], queue_output, qemu_output, None, False, None, qemu_pre, qemu_post, ) experiment["data"] = queue_output.get() if experiment["data"]["endpoint"] == 1: logger.info(f"{experiment['type']} successfully finished.") else: logger.critical( f"{experiment['type']} not finished after " f"{goldenrun_config['max_instruction_count']} tb counts." ) raise ValueError( f"{experiment['type']} not finished. Probably no valid instruction! " f"If valid increase tb max for golden run" ) data_queue.put(experiment["data"]) if experiment["type"] != "goldenrun": continue tbexec = pd.DataFrame(experiment["data"]["tbexec"]) tbinfo = pd.DataFrame(experiment["data"]["tbinfo"]) calculate_trigger_addresses(faultconfig, tbexec, tbinfo) faultconfig = checktriggers_in_tb(faultconfig, experiment["data"]) if "end" in config_qemu: for tb in experiment["data"]["tbinfo"]: config_qemu["max_instruction_count"] += tb["num_exec"] * tb["ins_count"] logger.info( "Max instruction count is {}".format( config_qemu["max_instruction_count"] ) ) return [config_qemu["max_instruction_count"], experiment["data"], faultconfig] def find_insn_addresses_in_tb(insn_address, data): tb_list_found = [] tbinfolist = data["tbinfo"] for tbinfo in tbinfolist: if (insn_address >= tbinfo["id"]) and ( insn_address < tbinfo["id"] + tbinfo["size"] ): tb_list_found.append(tbinfo) if len(tb_list_found) == 0: return False else: return True def checktriggers_in_tb(faultconfig, data): valid_triggers = [] invalid_triggers = [] for faultdescription in faultconfig: logger.info( "Check Fault {}/{} for valid trigger".format( faultdescription["index"] + 1, len(faultconfig) ) ) for fault in faultdescription["faultlist"]: if fault.trigger.address in valid_triggers: continue if fault.trigger.address in invalid_triggers: faultdescription["delete"] = True continue if find_insn_addresses_in_tb(fault.trigger.address, data): valid_triggers.append(fault.trigger.address) continue invalid_triggers.append(fault.trigger.address) faultdescription["delete"] = True error_message = ( f"Trigger address {fault.trigger.address} not found in tbs " f"executed in golden run! \nInvalid fault description: " f"{faultdescription}" ) for fault in faultdescription["faultlist"]: error_message += ( f"\nfault: {fault}, " f"triggeraddress: {fault.trigger.address}, " f"faultaddress: {fault.address}" ) logger.critical(error_message) logger.info("Filtering faultlist ...") len_faultlist = len(faultconfig) tmp =
pd.DataFrame(faultconfig)
pandas.DataFrame
# pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta import nose import numpy as np import pandas as pd from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull, bdate_range, date_range, _np_version_under1p7) import pandas.core.common as com from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long from pandas import compat, to_timedelta, tslib from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct from pandas.util.testing import (assert_series_equal, assert_frame_equal, assert_almost_equal, ensure_clean) import pandas.util.testing as tm def _skip_if_numpy_not_friendly(): # not friendly for < 1.7 if _np_version_under1p7: raise nose.SkipTest("numpy < 1.7") class TestTimedeltas(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): pass def test_numeric_conversions(self): _skip_if_numpy_not_friendly() self.assertEqual(ct(0), np.timedelta64(0,'ns')) self.assertEqual(ct(10), np.timedelta64(10,'ns')) self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]')) self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]')) self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]')) self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]')) self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]')) def test_timedelta_conversions(self): _skip_if_numpy_not_friendly() self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]')) self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]')) self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]')) def test_short_format_converters(self): _skip_if_numpy_not_friendly() def conv(v): return v.astype('m8[ns]') self.assertEqual(ct('10'), np.timedelta64(10,'ns')) self.assertEqual(ct('10ns'), np.timedelta64(10,'ns')) self.assertEqual(ct('100'), np.timedelta64(100,'ns')) self.assertEqual(ct('100ns'), np.timedelta64(100,'ns')) self.assertEqual(ct('1000'), np.timedelta64(1000,'ns')) self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns')) self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns')) self.assertEqual(ct('10us'), np.timedelta64(10000,'ns')) self.assertEqual(ct('100us'), np.timedelta64(100000,'ns')) self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns')) self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns')) self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns')) self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns')) self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns')) self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns')) self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns')) self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns')) self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns')) self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns')) self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns')) self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns')) self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D'))) self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D'))) self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D'))) self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D'))) self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D'))) self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D'))) self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D'))) # space self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D'))) self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D'))) # invalid self.assertRaises(ValueError, ct, '1foo') self.assertRaises(ValueError, ct, 'foo') def test_full_format_converters(self): _skip_if_numpy_not_friendly() def conv(v): return v.astype('m8[ns]') d1 = np.timedelta64(1,'D') self.assertEqual(ct('1days'), conv(d1)) self.assertEqual(ct('1days,'), conv(d1)) self.assertEqual(ct('- 1days,'), -conv(d1)) self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s'))) self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s'))) self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s'))) self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms'))) self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s'))) self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s'))) self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms'))) # invalid self.assertRaises(ValueError, ct, '- 1days, 00') def test_nat_converters(self): _skip_if_numpy_not_friendly() self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT) self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT) def test_to_timedelta(self): _skip_if_numpy_not_friendly() def conv(v): return v.astype('m8[ns]') d1 = np.timedelta64(1,'D') self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us'))) self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns'))) # empty string result = to_timedelta('',box=False) self.assertEqual(result, tslib.iNaT) result = to_timedelta(['', '']) self.assert_(isnull(result).all()) # pass thru result = to_timedelta(np.array([np.timedelta64(1,'s')])) expected = np.array([np.timedelta64(1,'s')]) tm.assert_almost_equal(result,expected) # ints result = np.timedelta64(0,'ns') expected = to_timedelta(0,box=False) self.assertEqual(result, expected) # Series expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) result = to_timedelta(Series(['1d','1days 00:00:01'])) tm.assert_series_equal(result, expected) # with units result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]') expected = to_timedelta([0,10],unit='s') tm.assert_series_equal(result, expected) # single element conversion v = timedelta(seconds=1) result = to_timedelta(v,box=False) expected = np.timedelta64(timedelta(seconds=1)) self.assertEqual(result, expected) v = np.timedelta64(timedelta(seconds=1)) result = to_timedelta(v,box=False) expected = np.timedelta64(timedelta(seconds=1)) self.assertEqual(result, expected) def test_to_timedelta_via_apply(self): _skip_if_numpy_not_friendly() # GH 5458 expected = Series([np.timedelta64(1,'s')]) result = Series(['00:00:01']).apply(to_timedelta) tm.assert_series_equal(result, expected) result = Series([to_timedelta('00:00:01')]) tm.assert_series_equal(result, expected) def test_timedelta_ops(self): _skip_if_numpy_not_friendly() # GH4984 # make sure ops return timedeltas s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ]) td = s.diff() result = td.mean()[0] # TODO This should have returned a scalar to begin with. Hack for now. expected = to_timedelta(timedelta(seconds=9)) tm.assert_almost_equal(result, expected) result = td.quantile(.1) # This properly returned a scalar. expected = to_timedelta('00:00:02.6') tm.assert_almost_equal(result, expected) result = td.median()[0] # TODO This should have returned a scalar to begin with. Hack for now. expected = to_timedelta('00:00:08') tm.assert_almost_equal(result, expected) # GH 6462 # consistency in returned values for sum result = td.sum()[0] expected = to_timedelta('00:01:21') tm.assert_almost_equal(result, expected) def test_to_timedelta_on_missing_values(self): _skip_if_numpy_not_friendly() # GH5438 timedelta_NaT = np.timedelta64('NaT') actual = pd.to_timedelta(Series(['00:00:01', np.nan])) expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]') assert_series_equal(actual, expected) actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
pandas.util.testing.assert_series_equal
from gams import * import pandas as pd import DataBase import COE import regex_gms from DB2Gams import * class am_base(gams_model_py): def __init__(self,tree,gams_settings=None): self.tree = tree super().__init__(self.tree.database,gsettings=gams_settings, blocks_text = None, functions = None, groups = {}, exceptions = [], exceptions_load = [], components = {}, export_files = None) def apply_type(self,type_): return eval(f"COE.{type_}()") def run_abatement_model(self,repo=os.getcwd(),type_='CES',export_settings=False): self.define_groups() self.define_blocks(type_=type_) self.run_default(repo,export_settings=export_settings) def define_groups(self,p='p',q='q',mu='mu',sigma='sigma'): self.p = p self.q = q self.mu = mu self.sigma = sigma if p not in self.database: self.database[p] = pd.Series(1,index=self.database[self.tree.setname],name=p) if q not in self.database: self.database[q] = pd.Series(1,index=self.database[self.tree.setname],name=q) if mu not in self.database: self.database[mu] = pd.Series(0.5,index=self.database[self.tree.mapname],name=mu) if sigma not in self.database: self.database[sigma] = pd.Series(0.5, index = self.database[self.tree.aggname],name=sigma) self.group_tech = {sigma: {'conditions': self.database.get(self.tree.aggname).to_str}, mu : {'conditions': self.database.get(self.tree.mapname).to_str}} self.group_exo = {p: {'conditions': self.database.get(self.tree.inpname).to_str}, q: {'conditions': self.database.get(self.tree.outname).to_str}} self.group_endo= {p: {'conditions': self.database.get(self.tree.aggname).to_str}, q: {'conditions': self.database.get(self.tree.sector).to_str+' and not '+self.database.get(self.tree.outname).to_str}} self.add_group_to_groups(self.group_tech,self.model.name+'_tech') self.add_group_to_groups(self.group_exo ,self.model.name+'_exo') self.add_group_to_groups(self.group_endo,self.model.name+'_endo') self.model.g_endo = [self.model.name+'_endo'] self.model.g_exo = [self.model.name+'_tech', self.model.name+'_exo'] # Arrange variables in types, with alias' etc. that are used to write equations: n2nn = {self.tree.setname: self.tree.alias} n2nnn = {self.tree.setname: self.tree.alias2} nn2n = {self.tree.alias : self.tree.setname} self.write_vars = { 'q': {'base' : self.database.get(self.q).to_str, 'alias' : self.database.get(self.q,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.q,alias_domains=n2nnn).to_str}, 'p': {'base' : self.database.get(self.p).to_str, 'alias' : self.database.get(self.p,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.p,alias_domains=n2nnn).to_str}, 'mu':{'base' : self.database.get(self.mu).to_str, 'alias' : self.database.get(self.mu,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.mu,alias_domains=n2nnn).to_str}, 'sigma':{'base' : self.database.get(self.sigma).to_str, 'alias' : self.database.get(self.sigma,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.sigma,alias_domains=n2nnn).to_str, 'level': self.database.get(self.sigma,level='.l').to_str}, 'inputs': { 'base' : self.tree.setname, 'alias' : self.tree.alias, 'alias2': self.tree.alias2}, 'in2aggs': {'base' : self.database.get(self.tree.mapname).to_str, 'alias' : self.database.get(self.tree.mapname,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.tree.mapname,alias_domains=n2nnn).to_str} } def define_blocks(self,type_): functype = self.apply_type(type_) self.blocks = """ $BLOCK M_{mname} {demand_equation} {price_equation} $ENDBLOCK """.format( mname = self.model.name, demand_equation = functype.equation('demand',f"E_{self.model.name}_q", self.groups[self.model.name+'_endo'][self.q]['domains'], self.groups[self.model.name+'_endo'][self.q]['conditions'], self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs']), price_equation = functype.equation('price_index',f"E_{self.model.name}_p", self.groups[self.model.name+'_endo'][self.p]['domains'], self.groups[self.model.name+'_endo'][self.p]['conditions'], self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs'])) self.model.blocks = ['M_'+self.model.name] class am_base_v2(gams_model_py): """ abatement model with quantities/prices defined over different sets. """ def __init__(self,tree,gams_settings=None): self.tree = tree super().__init__(self.tree.database,gsettings=gams_settings, blocks_text = None, functions = None, groups = {}, exceptions = [], exceptions_load = [], components = {}, export_files = None) def apply_type(self,type_): return eval(f"COE.{type_}()") def run_abatement_model(self,repo=os.getcwd(),type_='CES',export_settings=False,add_aggregates=False): self.define_groups() self.define_blocks(type_=type_) if add_aggregates is True: self.add_aggregates() self.run_default(repo,export_settings=export_settings) def define_groups(self,p='p',q='q',mu='mu',sigma='sigma',eta='eta'): self.p = p self.q = q self.mu = mu self.sigma = sigma self.eta = eta if p not in self.database: self.database[p] = pd.Series(1,index=self.database[self.tree.p_all],name=p) if q not in self.database: self.database[q] = pd.Series(1,index=self.database[self.tree.q_all],name=q) if mu not in self.database: self.database[mu] = pd.Series(0.5,index=self.database[self.tree.mapname],name=mu) if sigma not in self.database: self.database[sigma] = pd.Series(0.5, index = self.database[self.tree.aggname],name=sigma) self.group_tech = {sigma: {'conditions': self.database.get(self.tree.aggname).to_str}, mu : {'conditions': self.database.get(self.tree.mapname).to_str}} self.group_exo = {p: {'conditions': self.database.get(self.tree.inpname).to_str}, q: {'conditions': self.database.get(self.tree.outname).to_str}} self.group_endo= {p: {'conditions': self.database.get(self.tree.aggname).to_str}, q: {'conditions': self.database.get(self.tree.sector).to_str+' and not '+self.database.get(self.tree.outname).to_str}} self.add_group_to_groups(self.group_tech,self.model.name+'_tech') self.add_group_to_groups(self.group_exo ,self.model.name+'_exo') self.add_group_to_groups(self.group_endo,self.model.name+'_endo') self.model.g_endo = [self.model.name+'_endo'] self.model.g_exo = [self.model.name+'_tech', self.model.name+'_exo'] n2nn = {self.tree.setname: self.tree.alias} n2nnn = {self.tree.setname: self.tree.alias2} nn2n = {self.tree.alias : self.tree.setname} nn2nnn = {self.tree.alias : self.tree.alias2} self.write_vars = { 'q': {'base' : self.database.get(self.q).to_str, 'alias' : self.database.get(self.q,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.q,alias_domains=n2nnn).to_str}, 'p': {'base' : self.database.get(self.p).to_str, 'alias' : self.database.get(self.p,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.p,alias_domains=n2nnn).to_str}, 'mu':{'base' : self.database.get(self.mu).to_str, 'alias' : self.database.get(self.mu,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.mu,alias_domains=n2nnn).to_str}, 'sigma':{'base' : self.database.get(self.sigma).to_str, 'alias' : self.database.get(self.sigma,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.sigma,alias_domains=n2nnn).to_str, 'level': self.database.get(self.sigma,level='.l').to_str}, 'inputs': { 'base' : self.tree.setname, 'alias' : self.tree.alias, 'alias2': self.tree.alias2}, 'in2aggs': {'base' : self.database.get(self.tree.mapname).to_str, 'alias' : self.database.get(self.tree.mapname,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.tree.mapname,alias_domains=n2nnn).to_str}, 'q2p': {'base': self.database.get(self.tree.q2p,alias_domains=nn2nnn).to_str, 'alias': self.database.get(self.tree.q2p,alias_domains={**n2nn,**nn2nnn}).to_str}} def define_blocks(self,type_): """ Equation blocks for CES-input-like part of model: """ functype = self.apply_type(type_) self.blocks = """ $BLOCK M_{mname} {demand_equation} {price_equation} $ENDBLOCK """.format( mname = self.model.name, demand_equation = functype.equation('demand',f"E_{self.model.name}_q", self.database.get(self.q).to_string('dom'), self.groups[self.model.name+'_endo'][self.q]['conditions'], self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs'], self.write_vars['q2p']), price_equation = functype.equation('price_index',f"E_{self.model.name}_p", self.database.get(self.p).to_string('dom'), self.database.get(self.tree.aggname).to_str, self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs'], self.write_vars['q2p'])) self.model.blocks = ['M_'+self.model.name] def add_aggregates(self): self.group_endo_aggs = {self.q: {'conditions': self.database.get(self.tree.n2nn_agg).to_str}} self.add_group_to_groups(self.group_endo_aggs,self.model.name+'_aggs') self.model.g_endo += [self.model.name+'_aggs'] self.write_vars['n2nn'] = {'alias': self.database.get(self.tree.n2nn,alias_domains={**{self.tree.setname: self.tree.alias},**{self.tree.alias: self.tree.setname}}).to_str} self.blocks += """ $BLOCK M_{mname}_agg {sum_eq} $ENDBLOCK """.format( mname =self.model.name, sum_eq=COE.sums().equation('simple_sum',f"E_{self.model.name}_agg", self.database.get(self.q).to_string('dom'), self.groups[self.model.name+'_aggs'][self.q]['conditions'], self.write_vars['q']['base'],self.write_vars['q']['alias'], self.write_vars['inputs']['alias'], self.write_vars['n2nn']['alias'] )) self.model.blocks += ['M_'+self.model.name+'_agg'] class am_cet(gams_model_py): """ Includes an arbitrary combination of input/output nests (e.g. CES-input/CET-outputsplit) """ def __init__(self,tree,gams_settings=None): self.tree=tree self.block_components = {} super().__init__(self.tree.database,gsettings=gams_settings,blocks_text=None,functions=None,groups={},exceptions=[],exceptions_load=[],components = {},export_files = None) def apply_type(self,type_): return eval(f"COE.{type_}()") def run_abatement_model(self,repo=os.getcwd(),type_in='CES',type_out='CES',export_settings=False): self.define_groups() self.define_blocks_in(type_=type_in) self.define_blocks_out(type_=type_out) self.agg_block_components() self.run_default(repo,export_settings=export_settings) def agg_block_components(self): self.model.blocks = list(self.block_components.keys()) self.blocks = "" for component in self.block_components: self.blocks += self.block_components[component] # Define groups: def define_groups(self,p='p',q='q',mu='mu',sigma='sigma',eta='eta'): self.p = p self.q = q self.mu = mu self.sigma = sigma self.eta = eta if p not in self.database: self.database[p] = pd.Series(1,index=self.database[self.tree.setname],name=p) if q not in self.database: self.database[q] = pd.Series(1,index=self.database[self.tree.setname],name=q) if mu not in self.database: self.database[mu] = pd.Series(0.5,index=self.database[self.tree.all_map],name=mu) if sigma not in self.database: self.database[sigma] = pd.Series(0.5, index = self.database[self.tree.in_agg],name=sigma) if eta not in self.database: self.database[eta] = pd.Series(-0.5, index = self.database[self.tree.out_agg], name=eta) self.group_tech = {sigma: {'conditions': self.database.get(self.tree.in_agg).to_str}, eta : {'conditions': self.database.get(self.tree.out_agg).to_str}, mu : {'conditions': self.database.get(self.tree.all_map).to_str}} self.group_exo = {p: {'conditions': self.database.get(self.tree.inpname).to_str}, q: {'conditions': self.database.get(self.tree.outname).to_str}} self.group_endo= {p: {'conditions': self.database.get(self.tree.out_endo).to_str+' or '+self.database.get(self.tree.in_agg).to_str}, q: {'conditions': self.database.get(self.tree.in_endo).to_str +' or '+self.database.get(self.tree.out_agg).to_str}} self.add_group_to_groups(self.group_tech,self.model.name+'_tech') self.add_group_to_groups(self.group_exo ,self.model.name+'_exo') self.add_group_to_groups(self.group_endo,self.model.name+'_endo') self.model.g_endo = [self.model.name+'_endo'] self.model.g_exo = [self.model.name+'_tech', self.model.name+'_exo'] n2nn = {self.tree.setname: self.tree.alias} n2nnn = {self.tree.setname: self.tree.alias2} nn2n = {self.tree.alias : self.tree.setname} self.write_vars = { 'q': {'base' : self.database.get(self.q).to_str, 'alias' : self.database.get(self.q,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.q,alias_domains=n2nnn).to_str}, 'p': {'base' : self.database.get(self.p).to_str, 'alias' : self.database.get(self.p,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.p,alias_domains=n2nnn).to_str}, 'mu':{'base' : self.database.get(self.mu).to_str, 'alias' : self.database.get(self.mu,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.mu,alias_domains=n2nnn).to_str}, 'sigma':{'base' : self.database.get(self.sigma).to_str, 'alias' : self.database.get(self.sigma,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.sigma,alias_domains=n2nnn).to_str, 'level': self.database.get(self.sigma,level='.l').to_str}, 'eta' : {'base' : self.database.get(self.eta).to_str, 'alias' : self.database.get(self.eta,alias_domains=n2nn).to_str, 'alias2': self.database.get(self.eta,alias_domains=n2nnn).to_str}, 'inputs': { 'base' : self.tree.setname, 'alias' : self.tree.alias, 'alias2': self.tree.alias2}, 'in2aggs_in': {'base' : self.database.get(self.tree.in_map).to_str, 'alias' : self.database.get(self.tree.in_map,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.tree.in_map,alias_domains=n2nnn).to_str}, 'in2aggs_out':{'base' : self.database.get(self.tree.out_map).to_str, 'alias' : self.database.get(self.tree.out_map,alias_domains={**n2nn,**nn2n}).to_str, 'alias2': self.database.get(self.tree.out_map,alias_domains=n2nnn).to_str}} def define_blocks_in(self,type_): """ Equation blocks for CES-input-like part of model: """ functype = self.apply_type(type_) self.block_components['M_'+self.model.name+'_in'] = """ $BLOCK M_{mname}_in {demand_equation} {price_equation} $ENDBLOCK """.format( mname = self.model.name, demand_equation = functype.equation('demand',f"E_{self.model.name}_in_q", self.database.get(self.q).to_string('dom'), self.database.get(self.tree.in_endo).to_str, self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs_in']), price_equation = functype.equation('price_index',f"E_{self.model.name}_in_p", self.database.get(self.p).to_string('dom'), self.database.get(self.tree.in_agg).to_str, self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['sigma'],self.write_vars['inputs'],self.write_vars['in2aggs_in'])) def define_blocks_out(self,type_): """ Equation blocks for CET-output-split-like part of model: """ functype = self.apply_type(type_) self.block_components['M_'+self.model.name+'_out'] = """ $BLOCK M_{mname}_out {demand_equation} {price_equation} $ENDBLOCK """.format( mname = self.model.name, demand_equation = functype.equation('demand',f"E_{self.model.name}_out_q", self.database.get(self.q).to_string('dom'), self.database.get(self.tree.out_endo).to_str, self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['eta'],self.write_vars['inputs'],self.write_vars['in2aggs_out']), price_equation = functype.equation('price_index',f"E_{self.model.name}_out_p", self.database.get(self.p).to_string('dom'), self.database.get(self.tree.out_agg).to_str, self.write_vars['p'],self.write_vars['q'],self.write_vars['mu'], self.write_vars['eta'],self.write_vars['inputs'],self.write_vars['in2aggs_out'])) class am_CET_v2(gams_model_py): """ abatement model with CTE outputs and quantities/prices defined over different sets. """ def __init__(self,tree,gams_settings=None): self.tree = tree self.block_components = {} super().__init__(self.tree.database,gsettings=gams_settings,blocks_text=None,functions=None,groups={},exceptions=[],exceptions_load=[],components = {},export_files = None) def apply_type(self,type_): return eval(f"COE.{type_}()") def run_abatement_model(self,repo=os.getcwd(),type_in='CES_v2',type_out='CES',export_settings=False,add_aggregates=False): self.define_groups() self.define_blocks_in(type_=type_in) self.define_blocks_out(type_=type_out) self.agg_block_components() if add_aggregates is True: self.add_aggregates() self.run_default(repo,export_settings=export_settings) def agg_block_components(self): self.model.blocks = list(self.block_components.keys()) self.blocks = "" for component in self.block_components: self.blocks += self.block_components[component] def define_groups(self,p='p',q='q',mu='mu',sigma='sigma',eta='eta'): self.p = p self.q = q self.mu = mu self.sigma = sigma self.eta = eta if p not in self.database: self.database[p] = pd.Series(1,index=self.database[self.tree.p_all],name=p) if q not in self.database: self.database[q] = pd.Series(1,index=self.database[self.tree.q_all],name=q) if mu not in self.database: self.database[mu] =
pd.Series(0.5,index=self.database[self.tree.all_map],name=mu)
pandas.Series
"""Generate example parameter sets for Agtor""" import SALib from SALib.sample.latin import sample as latin_sampler import pandas as pd params =
pd.read_csv("test_params.csv")
pandas.read_csv
#Script for Importing data from Stats.cricinfo website import pandas as pd for var in range(1971,2018): newVar = str(var) DF = pd.read_html('http://stats.espncricinfo.com/ci/engine/records/team/match_results.html?class=2;id='+newVar+';type=year') newDF =
pd.DataFrame(DF[0])
pandas.DataFrame
'''Python script to generate Dashboard''' '''Authors - <NAME> ''' import numpy as np import pandas as pd from datetime import datetime import collections from .helpers import * class Dashboard: def __init__(self, mrr, cohorts, is_dict, bs_dict, cf_dict): print("INIT DASHBOARD") self.mrr =
pd.DataFrame(mrr)
pandas.DataFrame
import streamlit as st import pandas as pd import base64 import matplotlib.pyplot as plt import seaborn as sns import numpy as np import SessionState import altair as alt # Title Text st.title('NFLfastR Explorer') st.markdown(""" This app performs simple filtering of NFLfastR data. * **Upcoming features:** filter by for weather Add a page for team data *compare totals for selected players * **Python libraries:** base64, pandas, streamlit, numpy, matplotlib, seaborn * **Data source:** [NFLfastR](https://github.com/guga31bb/nflfastR-data/). """) #Sidebar st.sidebar.header('User Input Features') #Sidebar-Select Year selected_year = st.sidebar.multiselect('Year', list(reversed(range(1990,2021))), default=2020) # get data for year(s) @st.cache def load_data(year): data =
pd.DataFrame()
pandas.DataFrame
""" Point to Probabilistic """ import pandas as pd import numpy as np from autots.tools.impute import fake_date_fill from scipy.stats import percentileofscore def percentileofscore_appliable(x, a, kind='rank'): return percentileofscore(a, score=x, kind=kind) def historic_quantile(df_train, prediction_interval: float = 0.9): """ Computes the difference between the median and the prediction interval range in historic data. Args: df_train (pd.DataFrame): a dataframe of training data prediction_interval (float): the desired forecast interval range Returns: lower, upper (np.array): two 1D arrays """ quantiles = [0, 1 - prediction_interval, 0.5, prediction_interval, 1] bins = np.nanquantile(df_train.astype(float), quantiles, axis=0, keepdims=False) upper = bins[3] - bins[2] if 0 in upper: np.where(upper != 0, upper, (bins[4] - bins[2]) / 4) lower = bins[2] - bins[1] if 0 in lower: np.where(lower != 0, lower, (bins[2] - bins[0]) / 4) return lower, upper def inferred_normal(train, forecast, n: int = 5, prediction_interval: float = 0.9): """A corruption of Bayes theorem. It will be sensitive to the transformations of the data.""" prior_mu = train.mean() prior_sigma = train.std() from scipy.stats import norm p_int = 1 - ((1 - prediction_interval) / 2) adj = norm.ppf(p_int) upper_forecast, lower_forecast = pd.DataFrame(), pd.DataFrame() for index, row in forecast.iterrows(): data_mu = row post_mu = ( (prior_mu / prior_sigma ** 2) + ((n * data_mu) / prior_sigma ** 2) ) / ((1 / prior_sigma ** 2) + (n / prior_sigma ** 2)) lower = pd.DataFrame(post_mu - adj * prior_sigma).transpose() lower = lower.where(lower <= data_mu, data_mu, axis=1) upper = pd.DataFrame(post_mu + adj * prior_sigma).transpose() upper = upper.where(upper >= data_mu, data_mu, axis=1) lower_forecast = pd.concat([lower_forecast, lower], axis=0) upper_forecast = pd.concat([upper_forecast, upper], axis=0) lower_forecast.index = forecast.index upper_forecast.index = forecast.index return upper_forecast, lower_forecast """ post_mu = ((prior_mu/prior_sigma ** 2) + ((n * data_mu)/data_sigma ** 2))/ ((1/prior_sigma ** 2) + (n/data_sigma ** 2)) post_sigma = sqrt(1/((1/prior_sigma ** 2) + (n/data_sigma ** 2))) """ def Variable_Point_to_Probability(train, forecast, alpha=0.3, beta=1): """Data driven placeholder for model error estimation. ErrorRange = beta * (En + alpha * En-1 [cum sum of En]) En = abs(0.5 - QTP) * D D = abs(Xn - ((Avg % Change of Train * Xn-1) + Xn-1)) Xn = Forecast Value QTP = Percentile of Score in All Percent Changes of Train Score = Percent Change (from Xn-1 to Xn) Args: train (pandas.DataFrame): DataFrame of time series where index is DatetimeIndex forecast (pandas.DataFrame): DataFrame of forecast time series in which the index is a DatetimeIndex and columns/series aligned with train. Forecast must be > 1 in length. alpha (float): parameter which effects the broadening of error range over time Usually 0 < alpha < 1 (although it can be larger than 1) beta (float): parameter which effects the general width of the error bar Usually 0 < beta < 1 (although it can be larger than 1) Returns: ErrorRange (pandas.DataFrame): error width for each value of forecast. """ column_order = train.columns.intersection(forecast.columns) intial_length = len(forecast.columns) forecast = forecast[column_order] # align columns aligned_length = len(forecast.columns) train = train[column_order] if aligned_length != intial_length: print("Forecast columns do not match train, some series may be lost") train = train.replace(0, np.nan) train = fake_date_fill(train, back_method='keepNA') percent_changes = train.pct_change() median_change = percent_changes.median() # median_change = (1 + median_change) # median_change[median_change <= 0 ] = 0.01 # HANDLE GOING BELOW ZERO diffs = abs( forecast - (forecast + forecast * median_change).fillna(method='ffill').shift(1) ) forecast_percent_changes = forecast.replace(0, np.nan).pct_change() quantile_differences =
pd.DataFrame()
pandas.DataFrame
#realtor_graph.py #from neo4j_connect_2 import NeoSandboxApp #import neo4j_connect_2 as neo #import GoogleServices as google #from pyspark.sql import SparkSession #from pyspark.sql.functions import struct from cgitb import lookup import code from dbm import dumb from doctest import master from hmac import trans_36 import mimetypes from platform import node from pprint import pprint from pty import master_open from re import sub from unittest.util import unorderable_list_difference from urllib.parse import non_hierarchical from neomodel import (config, StructuredNode, StringProperty, IntegerProperty, UniqueIdProperty, RelationshipTo, BooleanProperty, EmailProperty, Relationship,db) import pandas as pd #import NeoNodes as nn #import GoogleServices import neo4jClasses #import sparkAPI as spark import neoModelAPI as neo import glob import os import json import numpy as np import re #from neoModelAPI import NeoNodes as nn def get_cwd(): cwd = os.getcwd() return cwd def get_files(cwd =os.getcwd(), input_directory = 'extras'): path = os.sep.join([cwd,input_directory]) file_list= [f for f in glob.glob(path + "**/*.json", recursive=True)] return file_list def instantiate_neo_model_api(): uri = "7a92f171.databases.neo4j.io" user = "neo4j" psw = 'RF4Gr2IJTNhHlW6HOrLDqz_I2E2Upyh7o8paTwfnCxg' return neo.neoAPI.instantiate_neo_model_session(uri=uri,user=user,psw=psw) def load_json_data(file): f = open (file, "r") # Reading from file data = json.loads(f.read()) return data def json_pipeline(file_list, master_subject_table): case_counter = 0 for file in file_list: data = load_json_data(file=file) data = data['results'] #pprint(data) #pprint(data[0]) #filtered_data = filter_json_data(json_data = data, filter = filter) # Creating the case nodes transaction nodes and df data = clean_json_data(data) case_data = stringify_json_values(data) case_data = pandify_case_data(case_data) case_data = nodify_case_data(case_data = case_data) # Creating the subject nodes transaction nodes and df subject_list = slice_subject_data(data) subject_list = identify_unique_subjects(subject_list) subject_lookup_table = create_subject_lookup_table(subject_list) master_subject_table = integrate_to_master_table(subject_lookup_table,master_subject_table) #pprint(master_subject_table.duplicated()) case_counter = case_counter + len(case_data) master_subject_table = nodify_subjects(master_subject_table) #pprint(case_data) #pprint(master_subject_table['transaction']) #lets save data to the database #master_subject_table = submit_subjects_to_db(master_subject_table) #case_data = submit_cases_to_db(case_data = case_data) # Create Relationships #relationship_list= create_relationship_table(case_data=case_data, master_subject_table=master_subject_table) def submit_cases_to_db(case_data): #unsubmitted = master_subject_table[master_subject_table.notna()] ### in theory none of the cases wouldhave been submitted becasue i am pulling them from file. There is no need to check.. Just submit #non_submitted_nodes = case_data[case_data['submitted'].isna()].copy() #pprint(non_submitted_nodes) ##pprint(non_submitted_nodes) #if non_submitted_nodes.empty: # return case_data #else: case_data['transaction'] = case_data['transaction'].apply(lambda x: neo.neoAPI.update(x)) #Assume all are submitted.. case_data['submitted'] = True #test = non_submitted_nodes.iloc[32]['transaction'] #return_obj = neo.neoAPI.update(test) #case_data.update(non_submitted_nodes) return case_data #Relationships must need to be created following saving to the df #relationships = create_relationship_table(case_data, master_subject_table) def submit_subjects_to_db(master_subject_table): #unsubmitted = master_subject_table[master_subject_table.notna()] #pprint(master_subject_table) #non_submitted_nodes=master_subject_table[[master_subject_table['submitted'] == np.nan]] non_submitted_nodes = master_subject_table[master_subject_table['submitted'].isna()].copy() #pprint(non_submitted_nodes) if non_submitted_nodes.empty: return master_subject_table else: #pprint(non_submitted_nodes) non_submitted_nodes['transaction'] = non_submitted_nodes['transaction'].apply(lambda x: neo.neoAPI.update(x)) non_submitted_nodes['submitted'] = True #test = non_submitted_nodes.iloc[32]['transaction'] #return_obj = neo.neoAPI.update(test) master_subject_table.update(non_submitted_nodes) #pprint(master_subject_table) return master_subject_table def tester(): return "Hello Dolly" def create_relationship_table(case_data, master_subject_table): #pprint(case_data[]) #test = master_subject_table['subject'] # select relationship_list = [] for row in range(len(case_data)): unique_dataframe = (master_subject_table[master_subject_table['subject'].isin(case_data['subject_list'][row])]) #pprint(unique_dataframe) for subject_row in range(len(unique_dataframe)): case = case_data.iloc[row]['transaction'] subject = unique_dataframe.iloc[subject_row]['transaction'] #create relationship #pprint(case) #pprint(subject) relationship = neo.neoAPI.create_relationship(case.subject_relationship,subject) #pprint(relationship) relationship_list.append(relationship) return relationship_list #create relationship between the case and each uid in the unique_data_frame_transaction_list pprint(unique_dataframe) ## Creating the realation table # Thoughts # pass subject and case table # case_subject list collumn # where that list is in the master table #return the subjects # make a connection to between each subject and the case in the returned tableuid in the table # return a transaction list # with the list commit a transaction for eachn # #case_data= filter_case_data(data) def nodify_case_data(case_data): #non_submitted_nodes = case_data[case_data.notna()] non_submitted_nodes = case_data[case_data.notna().any(axis=1)] #pprint(non_submitted_nodes) case_nodes = non_submitted_nodes.apply(lambda x :neo.neoAPI.create_case_node(date = x['date'], dates= x['dates'],group = x['group'], name=x['id'], pdf= x['pdf'], shelf_id = x['shelf_id'], subject= x['subject'], title = x['title'], url = x['url'], subject_relationship=True), axis=1) case_data['transaction'] = case_nodes return case_data def filter_case_data(data): pprint(data[0]) def nodify_subjects(master_subject_table): non_submitted_nodes = master_subject_table[master_subject_table.isna().any(axis=1)].copy() #df[df.isna().any(axis=1)] #pprint(non_submitted_nodes) non_submitted_nodes['transaction'] = non_submitted_nodes['subject'].apply(lambda x :neo.neoAPI.create_subject_node(name = x)) master_subject_table.update(non_submitted_nodes) return master_subject_table def integrate_to_master_table(subject_lookup_table, master_subject_table): #check_if subject in list is in subject of the table # if so drop it from the temp table # append what is left to the master table #pprint(subject_lookup_table) test = master_subject_table['subject'] unique_dataframe = (subject_lookup_table[~subject_lookup_table['subject'].isin(test)]) #pprint(unique_dataframe) #duplicate_list = (master_subject_table[~master_subject_table['subject'].isin(subject_lookup_table['subject'])]) master_subject_table = pd.concat([master_subject_table,unique_dataframe]) #master_subject_table.update(unique_dataframe) master_subject_table.reset_index(inplace=True, drop=True) #pprint(master_subject_table) #pprint(master_subject_table.duplicated()) return master_subject_table def create_subject_lookup_table(subject_list): lookup_table = pd.DataFrame(subject_list, columns=['subject']) lookup_table['transaction'] = np.nan lookup_table['submitted'] = np.nan return lookup_table def identify_unique_subjects(subject_list): # insert the list to the set list_set = set(subject_list) # convert the set to the list unique_list = (list(list_set)) return unique_list def slice_subject_data(data): subject_list = [] for case in data: subject_list = subject_list + case['subject_list'] #pprint(subject_list) return subject_list def pandify_case_data(data): #case_df = pd.concat(data, sort=False) df= pd.DataFrame(data) df['submitted'] = np.nan return df def stringify_json_values(data): for dict in data: subject_list = dict['subject'] for key in dict: if type(dict[key]) == list: tmp_list = [] for item in (dict[key]): item = item.replace(" ", "-") tmp_list.append(item) dict[key] = tmp_list dict[key] = ",".join(dict[key]) dict['subject_list'] = subject_list return data #pprint(data) def clean_json_data(filtered_data): # Select the keys that I want from the dictionary # filter appropriatly into a df # write df to file #print(type(filtered_data)) #pprint(filtered_data) for data in filtered_data: #pprint(data) #creat a dictionary of columns and values for each row. Combine them all into a df when we are done # each dictionary must be a row.... which makes perfect sense, but they can not be nested... item = data.pop('item', None) resources = data.pop('resources', None) index = data.pop('index', None) language = data.pop('language', None) online_format= data.pop('online_format', None) original_format = data.pop('original_format', None) kind = data.pop('type', None) image_url = data.pop('image_url', None) hassegments = data.pop('hassegments', None) extract_timestamp = data.pop('extract_timestamp', None) timestampe = data.pop('timestamp', None) mimetype=data.pop('mime_type', None) try: pdf = resources[0]['pdf'] except: pdf = "noPdf" data["pdf"] = pdf data['search_index'] = index # convert to strings maybe move into another function to be called. Actually will definitely move to a nother function return filtered_data #uid = UniqueIdProperty() ##date = date #dates = dates #group = group #id = id #pdf = pdf #shelf_id = shelf_id #subject = subject #primary_topic = primary_topic #title = title #url = url #description = description #source_collection = source_collection def filter_json_data(json_data, filter): # Using dict() # Extracting specific keys from dictionary filter = ['contributor','date', 'dates', 'digitized'] res = dict((k, json_data[k]) for k in filter if k in json_data) return res def create_master_subject_table(): table =
pd.DataFrame()
pandas.DataFrame
""" Author: <NAME> Python Version: 3.6, 3.7, 3.8 This script will take in pre-windowed multi-alignment files of any format specified at (https://biopython.org/wiki/AlignIO) and will calculate the p-distance of each sample from the indicated reference sample provided. *** Make sure to install dependencies Pandas and Biopython in your python enviornment. """ import glob import sys import os from pathlib import Path import argparse import time import datetime import logging import pathlib import numpy as np import pandas as pd from Bio import AlignIO # ------------------- Helper functions ------------------- def _get_chromosome_dirs(input_directory): """Collect chromosome directories""" dirs = [] for d in input_directory.iterdir(): if not d.is_dir(): continue # Just in case user re-runs and # does not delete output files elif d.name == 'logs': continue elif d.name == 'p_distance_output': continue else: dirs.append(d) return dirs def base_check(base): """Check if current base is A, T, C, or G. All other symbols will be concidered as N """ bases = ['A', 'T', 'C', 'G'] if base not in bases: return False else: return True def p_distance_calc(current_sample_seq, reference_sample_seq, missing_data_threshold): """This function takes in the current sample sequence and the reference sample sequence and calculates the p-distance per window""" snp_count = 0 same_base = 0 nan_count = 0 logging.debug(f"Current Sample: {current_sample_seq[:10]}--{current_sample_seq[-10:]}") logging.debug(f"Reference Sample: {reference_sample_seq[:10]}--{reference_sample_seq[-10:]}\n") for curr_samp_base, ref_base in zip(current_sample_seq, reference_sample_seq): curr_samp_check = base_check(curr_samp_base) ref_base_check = base_check(ref_base) if not curr_samp_check or not ref_base_check: nan_count += 1 elif curr_samp_base != ref_base: snp_count += 1 else: same_base += 1 continue assert snp_count + same_base + nan_count == len(current_sample_seq) if nan_count / (snp_count + same_base + nan_count) >= missing_data_threshold: p_distance = np.nan else: p_distance = float(snp_count / len(current_sample_seq)) return p_distance def per_sample_calc( reference_sample, alignment_df, file_name, file_df, missing_data_threshold, outfile_index ): # ["Chromosome", "Start", "Stop"] split_file_name = file_name.split("_") chromosome = str(split_file_name[0]) start = int(split_file_name[1]) stop = int(split_file_name[2]) # Set Chromosome, Start, and Stop in file_df file_df.at[outfile_index, "Chromosome"] = chromosome file_df.at[outfile_index, "Start"] = start file_df.at[outfile_index, "Stop"] = stop reference_sample_sequence = list(alignment_df[reference_sample]) logging.debug(f"Samples: {alignment_df.columns.to_list()}") for current_sample in alignment_df.columns: if current_sample == reference_sample: file_df.at[outfile_index, str(reference_sample)] = 0 continue else: current_sample_seq = list(alignment_df[current_sample]) p_distance = p_distance_calc( current_sample_seq, reference_sample_sequence, missing_data_threshold, ) file_df.at[outfile_index, str(current_sample)] = p_distance continue return file_df # ------------------------- BODY ------------------------- def p_distance_calculator( input_directory=None, output_directory=None, reference_sample=None, project_id=None, missing_data_threshold=None, window_size=None, ): # set Paths() input_directory = Path(input_directory) if not output_directory: output_directory = Path(input_directory) / "p_distance_output" else: output_directory = Path(output_directory) # Make output directory output_directory.mkdir(parents=True, exist_ok=True) # Set Date DATE = str(datetime.date.today()).replace('-', '_') # Initialize log file log_path = output_directory / "logs/" log_path.mkdir(parents=True, exist_ok=True) log_file = log_path / f"{project_id}_{DATE}.log" logging.basicConfig( filename=log_file, level=logging.DEBUG, filemode='w', format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) """Collect chromosome directories into list""" chromosome_dirs = _get_chromosome_dirs(input_directory) """Iterate through each chromosome directory in reverse""" """and calculate p-distance per-file""" for chrom_dir in reversed(chromosome_dirs): print(f"---- Running {chrom_dir.name} ----") logging.debug(f"---- Running {chrom_dir.name} ----") windowed_files = [f for f in chrom_dir.iterdir() if f.is_file()] file_df = None outfile_index = 0 columns_flag = True file_tracker = 1 """ Iterate through each file in the chrom directory""" for f in windowed_files: if file_tracker % 10 == 0: print(f"-- Completed {file_tracker} of {len(windowed_files)} files --") try: read_alignment = AlignIO.read(f, 'fasta') except ValueError: raise ValueError("Faulty input file -- exit") alignment_dict = {str(read.id): list(read.seq) for read in read_alignment} sample_names = [sample_name.name for sample_name in read_alignment] alignment_df = pd.DataFrame( data=alignment_dict, columns=sample_names) # Set up output_df if columns_flag: column_names = list(alignment_df) column_names = ["Chromosome", "Start", "Stop"] + column_names file_df =
pd.DataFrame(columns=column_names)
pandas.DataFrame
#!/usr/bin/env python3 # intro and code (parse gpx, plot) from # https://towardsdatascience.com/how-tracking-apps-analyse-your-gps-data-a-hands-on-tutorial-in-python-756d4db6715d import gpxpy import numpy as np import pandas as pd import matplotlib.pyplot as plt from simplification.cutil import simplify_coords from pykalman import KalmanFilter # parse GPX gpx_file = open('test/RunnerUp_01.gpx', 'r') gpx = gpxpy.parse(gpx_file) data = gpx.tracks[0].segments[0].points # GPX to pandas data frame df = pd.DataFrame(columns=['lon', 'lat', 'alt', 'time']) for segment in gpx.tracks[0].segments: for point in segment.points: df = df.append({'lon': point.longitude, 'lat': point.latitude, 'alt': point.elevation, 'time': point.time}, ignore_index=True) start = df.iloc[0] finish = df.iloc[-1] # post-processing stages = [] stages.append(df) def moving_averaging(df, window=10): return df[['lat', 'lon', 'alt']] \ .rolling(window, win_type='hamming').mean().dropna() def kalman_filter_add_v(df): # calculate v_lat and v_lon for each coordinate v_lat = [0] v_lon = [0] for i in range(len(df)-1): lon1 = df['lon'].iloc[i] lat1 = df['lat'].iloc[i] lon2 = df['lon'].iloc[i+1] lat2 = df['lat'].iloc[i+1] dt = (df['time'].iloc[i+1] - df['time'].iloc[i]).seconds if dt == 0: dt = 0.1 vlat = (lat2 - lat1) / dt vlon = (lon2 - lon1) / dt v_lat.append(vlat) v_lon.append(vlon) df.loc[:, 'v_lon'] = pd.Series(v_lon, index=df.index) df.loc[:, 'v_lat'] =
pd.Series(v_lat, index=df.index)
pandas.Series
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import logging import collections import functools import re import sys import warnings from contextlib import contextmanager from datetime import datetime, timedelta from distutils.version import LooseVersion import pandas as pd import numpy as np import scipy from scipy import stats import yaml import trackpy # Set is_pandas_since_016 for use elsewhere. # Pandas >= 0.16.0 lets us check if a DataFrame is a view. try: is_pandas_since_016 = (LooseVersion(pd.__version__) >= LooseVersion('0.16.0')) except ValueError: # Probably a development version is_pandas_since_016 = True try: is_pandas_since_017 = (LooseVersion(pd.__version__) >= LooseVersion('0.17.0')) except ValueError: # Probably a development version is_pandas_since_017 = True try: is_pandas_since_018 = (LooseVersion(pd.__version__) >= LooseVersion('0.18.0')) except ValueError: # Probably a development version is_pandas_since_018 = True try: is_pandas_since_023 = (LooseVersion(pd.__version__) >= LooseVersion('0.23.0')) except ValueError: # Probably a development version is_pandas_since_023 = True # Wrap the scipy cKDTree to work around a bug in scipy 0.18.0 try: is_scipy_018 = LooseVersion(scipy.__version__) == LooseVersion('0.18.0') except ValueError: # Probably a development version is_scipy_018 = False if is_scipy_018: from scipy.spatial import KDTree as cKDTree warnings.warn("Due to a bug in Scipy 0.18.0, the (faster) cKDTree cannot " "be used. For better linking performance, upgrade or " "downgrade scipy.") else: from scipy.spatial import cKDTree try: is_scipy_since_100 = LooseVersion(scipy.__version__) >= LooseVersion('1.0.0') except ValueError: # Probably a development version is_scipy_since_100 = True def fit_powerlaw(data, plot=True, **kwargs): """Fit a powerlaw by doing a linear regression in log space.""" ys = pd.DataFrame(data) x = pd.Series(data.index.values, index=data.index, dtype=np.float64) values = pd.DataFrame(index=['n', 'A']) fits = {} for col in ys: y = ys[col].dropna() slope, intercept, r, p, stderr = \ stats.linregress(np.log(x), np.log(y)) values[col] = [slope, np.exp(intercept)] fits[col] = x.apply(lambda x: np.exp(intercept)*x**slope) values = values.T fits = pandas_concat(fits, axis=1) if plot: from trackpy import plots plots.fit(data, fits, logx=True, logy=True, legend=False, **kwargs) return values class memo(object): """Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize """ def __init__(self, func): self.func = func self.cache = {} functools.update_wrapper(self, func) def __call__(self, *args): if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. warnings.warn("A memoization cache is being used on an uncacheable " + "object. Proceeding by bypassing the cache.", UserWarning) return self.func(*args) if args in self.cache: return self.cache[args] else: value = self.func(*args) self.cache[args] = value return value # This code trips up numba. It's nice for development # but it shouldn't matter for users. # def __repr__(self): # '''Return the function's docstring.''' # return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) def extract(pattern, string, group, convert=None): """Extract a pattern from a string. Optionally, convert it to a desired type (float, timestamp, etc.) by specifying a function. When the pattern is not found, gracefully return None.""" # group may be 1, (1,) or (1, 2). if type(group) is int: grp = (group,) elif type(group) is tuple: grp = group assert type(grp) is tuple, "The arg 'group' should be an int or a tuple." try: result = re.search(pattern, string, re.DOTALL).group(*grp) except AttributeError: # For easy unpacking, when a tuple is expected, return a tuple of Nones. return None if type(group) is int else (None,)*len(group) return convert(result) if convert else result def timestamp(ts_string): "Convert a timestamp string to a datetime type." if ts_string is None: return None return datetime.strptime(ts_string, '%Y-%m-%d %H:%M:%S') def time_interval(raw): "Convert a time interval string into a timedelta type." if raw is None: return None m = re.match('([0-9][0-9]):([0-5][0-9]):([0-5][0-9])', raw) h, m, s = map(int, m.group(1, 2, 3)) return timedelta(hours=h, minutes=m, seconds=s) def suppress_plotting(): import matplotlib.pyplot as plt plt.switch_backend('Agg') # does not plot to screen # HH:MM:SS, H:MM:SS, MM:SS, M:SS all OK lazy_timestamp_pat = r'\d?\d?:?\d?\d:\d\d' # a time stamp followed by any text comment ltp = lazy_timestamp_pat video_log_pattern = r'(' + ltp + r')-?(' + ltp + r')? ?(RF)?(.+)?' def lazy_timestamp(partial_timestamp): """Regularize a lazy timestamp like '0:37' -> '00:00:37'. HH:MM:SS, H:MM:SS, MM:SS, and M:SS all OK. Parameters ---------- partial_timestamp : string or other object Returns ------- regularized string """ if not isinstance(partial_timestamp, str): # might be NaN or other unprocessable entry return partial_timestamp input_format = '\d?\d?:?\d?\d:\d\d' if not re.match(input_format, partial_timestamp): raise ValueError("Input string cannot be regularized.") partial_digits = list(partial_timestamp) digits = ['0', '0', ':', '0', '0', ':', '0', '0'] digits[-len(partial_digits):] = partial_digits return ''.join(digits) def timedelta_to_frame(timedeltas, fps): """Convert timedelta times into frame numbers. Parameters ---------- timedelta : DataFrame or Series of timedelta64 datatype fps : frames per second (integer) Result ------ DataFrame Note ---- This sounds like a stupidly easy operation, but handling missing data and multiplication is tricky with timedeltas. """ ns = timedeltas.values seconds = ns * 1e-9 frame_numbers = seconds*fps result = pd.DataFrame(frame_numbers, dtype=np.int64, index=timedeltas.index, columns=timedeltas.columns) result = result.where(timedeltas.notnull(), np.nan) return result def random_walk(N): return np.cumsum(np.random.randn(N), 1) def record_meta(meta_data, file_obj): file_obj.write(yaml.dump(meta_data, default_flow_style=False)) def validate_tuple(value, ndim): if not hasattr(value, '__iter__'): return (value,) * ndim if len(value) == ndim: return tuple(value) raise ValueError("List length should have same length as image dimensions.") try: from IPython.core.display import clear_output except ImportError: pass def make_pandas_strict(): """Configure Pandas to raise an exception for "chained assignments." This is useful during tests. See http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy Does nothing for Pandas versions before 0.13.0. """ if LooseVersion(pd.__version__) >= LooseVersion('0.13.0'): pd.set_option('mode.chained_assignment', 'raise') class IPythonStreamHandler(logging.StreamHandler): "A StreamHandler for logging that clears output between entries." def emit(self, s): clear_output(wait=True) print(s.getMessage()) def flush(self): sys.stdout.flush() FORMAT = "%(name)s.%(funcName)s: %(message)s" formatter = logging.Formatter(FORMAT) # Check for IPython and use a special logger use_ipython_handler = False try: import IPython except ImportError: pass else: if IPython.get_ipython() is not None: use_ipython_handler = True if use_ipython_handler: default_handler = IPythonStreamHandler() else: default_handler = logging.StreamHandler(sys.stdout) default_handler.setLevel(logging.INFO) default_handler.setFormatter(formatter) def handle_logging(): "Send INFO-level log messages to stdout. Do not propagate." if use_ipython_handler: # Avoid double-printing messages to IPython stderr. trackpy.logger.propagate = False trackpy.logger.addHandler(default_handler) trackpy.logger.setLevel(logging.INFO) def ignore_logging(): "Reset to factory default logging configuration; remove trackpy's handler." trackpy.logger.removeHandler(default_handler) trackpy.logger.setLevel(logging.NOTSET) trackpy.logger.propagate = True def quiet(suppress=True): """Suppress trackpy information log messages. Parameters ---------- suppress : boolean If True, set the logging level to WARN, hiding INFO-level messages. If False, set level to INFO, showing informational messages. """ if suppress: trackpy.logger.setLevel(logging.WARN) else: trackpy.logger.setLevel(logging.INFO) def _pandas_sort_pre_017(df, by, *args, **kwargs): """Use sort() to sort a DataFrame""" return df.sort(*args, columns=by, **kwargs) def _pandas_sort_post_017(df, by, *args, **kwargs): """Use sort_values() to sort a DataFrame""" return df.sort_values(*args, by=by, **kwargs) if is_pandas_since_017: pandas_sort = _pandas_sort_post_017 else: pandas_sort = _pandas_sort_pre_017 def _pandas_iloc_pre_016(df, inds): """Workaround for bug, iloc with empty list, in pandas < 0.16""" if len(inds) > 0: return df.iloc[inds] else: return df.iloc[:0] def _pandas_iloc_since_016(df, inds): return df.iloc[inds] if is_pandas_since_016: pandas_iloc = _pandas_iloc_since_016 else: pandas_iloc = _pandas_iloc_pre_016 def _pandas_rolling_pre_018(df, window, *args, **kwargs): """Use rolling_mean() to compute a rolling average""" return
pd.rolling_mean(df, window, *args, **kwargs)
pandas.rolling_mean
"""figures of merit is a collection of financial calculations for energy. This module contains financial calculations based on solar power and batteries in a given network. The networks used are defined as network objects (see evolve parsers). TODO: Add inverters: Inverters are not considered at the moment and Improve Nan Handeling """ import numpy import pandas as pd from c3x.data_cleaning import unit_conversion #Todo: #Add inverters: Inverters are not considered at the moment #Improve Nan Handeling def meter_power(meas_dict: dict, meter: int, axis: int = 0, column: int = 0) -> pd.Series: """ calculates the power for a meter of individual measurement points by summing load, solar and battery power Args: meas_dict (dict): dict with measurement for one or multiple nodes. meter(int): Id for a meter axis (int): how data is concatenated for results column (int): column index to be used return: meter_p (pd.Series): combined power (solar, battery, load) """ meter_p = pd.DataFrame() if meas_dict[meter]: meter_p = pd.DataFrame() for meas in meas_dict[meter]: if 'load' in meas: meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis) elif 'solar' in meas: meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis) elif 'batteries' in meas: meter_p = pd.concat([meter_p, meas_dict[meter][meas].iloc[:,column]], axis=axis) meter_p = meter_p.sum(axis=1) return meter_p def financial(meter_p: pd.Series, import_tariff: pd.Series, export_tariff: pd.Series) -> pd.Series: """ Evaluate the financial outcome for a customer. A conversion from kW to kWh is handled internally Note: assumes constant step size in timestamps (use forth index beforehand) Args: meter_p (pd.Series ): Power of a node import_tariff (pd.Series): Expects this to be in $/kWh. export_tariff (pd.Series): Expects this to be in $/kWh. Returns: cost (pd.Series): cost per measurement point, using import and export tariffs """ # Note: need to ensure meter data is converted to kWh timestep = numpy.timedelta64(meter_p.index[1] - meter_p.index[0]) meter = unit_conversion.convert_watt_to_watt_hour(meter_p, timedelta=timestep) import_power_cost = meter.where(meter >= 0).fillna(value=0.0) export_power_revenue = meter.where(meter < 0).fillna(value=0.0) cost = import_power_cost * import_tariff + export_power_revenue*export_tariff return cost def customer_financial(meas_dict: dict, node_keys: list = None, tariff: dict = None) -> dict: """ Evaluate the financial outcome for a selected customer or for all customers. Note: not currently setup to handle missing data (eg NANs) #TODO: consider inverters and how to avoid double counting with solar, batteries Args: meas_dict (dict): dict with measurement for one or multiple nodes. node_keys (list): list nodes for which financials are calculated. tariff (dict): nodes tariff data. Expects this to be in $/kWh. Returns: results_dict: cost per node and the average cost over all nodes """ results_dict = {} average = [] nodes = node_keys if node_keys else meas_dict.keys() for key in nodes: if type(key) == int: key = str(key) if meas_dict[key]: if key in tariff: meter_p = meter_power(meas_dict, key, axis=1) meter_p_cost = financial(meter_p, tariff[key]['import_tariff'], tariff[key]['export_tariff']) results_dict[key] = meter_p_cost initiate = 0 for node in results_dict.values(): average = node if initiate == 0 else average.append(node) initiate = 1 average = numpy.nanmean(average) results_dict["average"] = average return results_dict def customer_cost_financial(tariff: dict, energy_grid_load: pd.Series, energy_solar_grid: pd.Series, energy_battery_load: pd.Series, energy_solar_battery: pd.Series, energy_solar_load: pd.Series) -> pd.Series: """ evaluates the customers cost Args: tariff: specifies tariffs to be applied to aggregation of customers. energy_grid_load: specifies the energy flow between grid and load energy_solar_grid: specifies the energy flow between solar and gird energy_battery_load: specifies the energy flow between battery and load energy_solar_battery: specifies the energy flow between solar and battery energy_solar_load: specifies the energy flow between solar and load Returns: customer_cost (pd.Series): """ customer_cost = financial(energy_grid_load, tariff['re_import_tariff'], 0) customer_cost += financial(energy_grid_load, tariff['rt_import_tariff'], 0) customer_cost += financial(energy_battery_load, tariff['le_import_tariff'], 0) customer_cost += financial(energy_battery_load, tariff['lt_import_tariff'], 0) customer_cost -= financial(energy_solar_grid, tariff['re_export_tariff'], 0) customer_cost += financial(energy_solar_grid, tariff['rt_export_tariff'], 0) customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0) customer_cost += financial(energy_solar_battery, tariff['lt_export_tariff'], 0) customer_cost -= financial(energy_solar_battery, tariff['le_export_tariff'], 0) customer_cost += financial(energy_solar_load, tariff['lt_import_tariff'], 0) customer_cost += financial(energy_solar_load, tariff['lt_export_tariff'], 0) return customer_cost def battery_cost_financial(tariff: dict, energy_grid_battery: pd.Series, energy_battery_grid: pd.Series, energy_battery_load: pd.Series, energy_solar_battery: pd.Series) -> pd.Series: """ evaluates the battery cost Args: tariff (dict): specifies tariffs to be applied to aggregation of customers. energy_grid_battery (pd.Series): specifies the energy flow between grid and battery energy_battery_grid (pd.Series): specifies the energy flow between battery and gird energy_battery_load (pd.Series): specifies the energy flow between battery and load energy_solar_battery (pd.Series): specifies the energy flow between solar and battery Returns: battery_cost (pd.Series): """ battery_cost = financial(energy_solar_battery, tariff['le_import_tariff'], 0) battery_cost += financial(energy_solar_battery, tariff['lt_import_tariff'], 0) battery_cost -= financial(energy_battery_load, tariff['le_export_tariff'], 0) battery_cost += financial(energy_battery_load, tariff['lt_export_tariff'], 0) battery_cost += financial(energy_grid_battery, tariff['re_import_tariff'], 0) battery_cost += financial(energy_grid_battery, tariff['rt_import_tariff'], 0) battery_cost -= financial(energy_battery_grid, tariff['re_export_tariff'], 0) battery_cost += financial(energy_battery_grid, tariff['rt_export_tariff'], 0) return battery_cost def network_cost_financial(tariff: dict, energy_grid_load: pd.Series, energy_grid_battery: pd.Series, energy_battery_grid: pd.Series, energy_battery_load: pd.Series, energy_solar_battery: pd.Series, energy_solar_load: pd.Series) -> pd.Series: """ evaluates the network cost Args: tariff (dict): specifies tariffs to be applied to aggregation of customers. energy_grid_load (pd.Series): specifies the energy flow between grid and load energy_grid_battery (pd.Series): specifies the energy flow between grid and battery energy_battery_grid (pd.Series): specifies the energy flow between battery and grid energy_battery_load (pd.Series): specifies the energy flow between battery and solar energy_solar_battery (pd.Series) : specifies the energy flow between solar and battery energy_solar_load (pd.Series): specifies the energy flow between solar and load Returns: network_cost(pd.Series) """ network_cost = -financial(energy_grid_load, tariff['rt_import_tariff'], 0) network_cost -= financial(energy_battery_load, tariff['lt_import_tariff'], 0) network_cost -= financial(energy_battery_load, tariff['lt_export_tariff'], 0) network_cost -= financial(energy_solar_battery, tariff['lt_import_tariff'], 0) network_cost -= financial(energy_solar_battery, tariff['lt_export_tariff'], 0) network_cost -= financial(energy_grid_battery, tariff['rt_import_tariff'], 0) network_cost -= financial(energy_battery_grid, tariff['rt_export_tariff'], 0) network_cost -= financial(energy_solar_load, tariff['lt_import_tariff'], 0) network_cost -= financial(energy_solar_load, tariff['lt_export_tariff'], 0) return network_cost def lem_financial(customer_tariffs, energy_grid_load, energy_grid_battery, energy_solar_grid, energy_battery_grid, energy_battery_load, energy_solar_battery, energy_solar_load, battery_tariffs=None): """ evaluate the cost for the local energy model Args: customer_tariffs: specifies tariffs to be applied to aggregation of customers. energy_grid_load (pd.series): specifies the energy flow between grid and load energy_grid_battery: specifies the energy flow between grid and battery energy_solar_grid: specifies the energy flow between solar and grid energy_battery_grid: specifies the energy flow between battery and grid energy_battery_load: specifies the energy flow between battery and solar energy_solar_battery: specifies the energy flow between solar and battery energy_solar_load: specifies the energy flow between solar and load battery_tariffs: specifies tariffs to be applied to aggregation of battery. (if none given customer_tariffs ware used) Returns: customer_cost, battery_cost, network_cost """ customer_cost = customer_cost_financial(customer_tariffs, energy_grid_load, energy_solar_grid, energy_battery_load, energy_solar_battery, energy_solar_load) bt_choice = battery_tariffs if battery_tariffs else customer_tariffs battery_cost = battery_cost_financial(bt_choice, energy_grid_battery, energy_battery_grid, energy_battery_load, energy_solar_battery) network_cost = network_cost_financial(customer_tariffs, energy_grid_load, energy_grid_battery, energy_battery_grid, energy_battery_load, energy_solar_battery, energy_solar_load) return customer_cost, battery_cost, network_cost def peak_powers(meas_dict: dict, node_keys: list = None) -> dict: """ Calculate the peak power flows into and out of the network. #TODO: consider selecting peak powers per phase #TODO: consider inverters and how to avoid double counting with solar, batteries Args: meas_dict (dict): dict with measurement for one or multiple nodes. node_keys (list): list of Node.names in Network.nodes. Returns: results_dict (dict): dictionary of peak power into and out of network in kW, and in kW/connection point. """ nodes = node_keys if node_keys else meas_dict.keys() sum_meter_power = pd.DataFrame([]) for key in nodes: if type(key) == int: key = str(key) if meas_dict[key]: meter_p = meter_power(meas_dict, key, axis=1) if sum_meter_power.empty: sum_meter_power = meter_p.copy() else: sum_meter_power = pd.concat([sum_meter_power, meter_p], axis=1, sort=True) sum_power = sum_meter_power.sum(axis=1) aver_power = numpy.nanmean(sum_meter_power, axis=1) return {"peak_power_import": numpy.max(sum_power), "peak_power_export": numpy.min(sum_power), "peak_power_import_av": numpy.max(aver_power), "peak_power_export_av": numpy.min(aver_power), "peak_power_import_index": sum_power.idxmax(), "peak_power_export_index": sum_power.idxmax()} def self_sufficiency(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame): """ Self-sufficiency = 1 - imports / consumption Note: the function expects a full index #TODO: consider inverters and how to avoid double counting with solar, batteries Args: load_p (pd.dataframe): measurement data for load of a s single node. solar_p (pd.dataframe): measurement data for solar of a s single node. battery_p(pd.dataframe): measurement data for battery of a s single node. Returns: results_dict: self_consumption_solar, self_consumption_batteries """ self_sufficiency_solar = numpy.nan self_sufficiency_battery = numpy.nan if not load_p.empty: net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1) net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1) #create an array that contains which entries are import and which are export mask_import_solar = (net_load_solar >= 0) mask_import_solar_battery = (net_load_solar_battery >= 0) net_import_solar = net_load_solar * mask_import_solar net_import_solar_battery = net_load_solar_battery * mask_import_solar_battery sum_load = numpy.nansum(load_p) sum_solar = numpy.nansum(solar_p) # it doesn't make sense to calculate this if there is no solar or the load date is missing (0.0) if sum_solar < 0 and sum_load != 0: self_sufficiency_solar = 1 - (numpy.nansum(net_import_solar) / sum_load) self_sufficiency_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_load) else: print("Warning: not enough data to calculate") return {"self_sufficiency_solar": self_sufficiency_solar, "self_sufficiency_batteries": self_sufficiency_battery} def self_consumption(load_p: pd.DataFrame, solar_p: pd.DataFrame, battery_p: pd.DataFrame) -> dict: """ Self-consumption = 1 - exports / generation Note: the function expects a full index #TODO: consider inverters and how to avoid double counting with solar, batteries Args: load_p (pd.dataframe): measurement data for load of a s single node. solar_p (pd.dataframe): measurement data for solar of a s single node. battery_p(pd.dataframe): measurement data for battery of a s single node. Retruns: results_dict: self_consumption_solar, self_consumption_batteries """ net_load_solar = pd.concat((load_p, solar_p), axis=1).sum(axis=1) net_load_solar_battery = pd.concat((load_p, solar_p, battery_p), axis=1).sum(axis=1) # create an array that contains which entries are import and which are export mask_export_solar = (net_load_solar < 0) mask_export_solar_battery = (net_load_solar_battery < 0) net_export_solar = net_load_solar * mask_export_solar net_import_solar_battery = net_load_solar_battery * mask_export_solar_battery sum_solar = numpy.nansum(solar_p) self_consumption_solar = numpy.nan self_consumption_battery = numpy.nan if sum_solar < 0: self_consumption_solar = 1 - (numpy.nansum(net_export_solar) / sum_solar) self_consumption_battery = 1 - (numpy.nansum(net_import_solar_battery) / sum_solar) return {"self_consumption_solar": self_consumption_solar, "self_consumption_batteries": self_consumption_battery} def self_sufficiency_self_consumption_average(self_consumption_self_sufficiency_dict: dict) -> dict: """ calculates the average for self sufficiency and consumption over a given measurement. #TODO: consider inverters and how to avoid double counting with solar, batteries Args: self_consumption_self_sufficiency_dict: The dictionary has a node Id as Key and 4 values per node Returns: results_dict: dictionary with averages for the given network """ self_sufficiency_solar = [] self_sufficiency_batteries = [] self_consumption_solar = [] self_consumption_batteries = [] for node in self_consumption_self_sufficiency_dict.values(): self_sufficiency_solar.append(node["self_sufficiency_solar"]) self_sufficiency_batteries.append(node["self_sufficiency_batteries"]) self_consumption_solar.append(node["self_consumption_solar"]) self_consumption_batteries.append(node["self_consumption_batteries"]) av_self_sufficiency_solar = numpy.nanmean(self_sufficiency_solar) av_self_sufficiency_batteries = numpy.nanmean(self_sufficiency_batteries) av_self_consumption_solar = numpy.nanmean(self_consumption_solar) av_self_consumption_batteries = numpy.nanmean(self_consumption_batteries) return {"av_self_sufficiency_solar": av_self_sufficiency_solar, "av_self_sufficiency_batteries": av_self_sufficiency_batteries, "av_self_consumption_solar": av_self_consumption_solar, "av_self_consumption_batteries": av_self_consumption_batteries} def self_sufficiency_self_consumption(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict: """ Self-sufficiency = 1 - imports / consumption Self-consumption = 1 - exports / generation And average over those #TODO: consider inverters and how to avoid double counting with solar, batteries Args: meas_dict (dict): dict with measurement for one or multiple nodes. node_keys (list): list of Node.names in Network.nodes. column (int): Column index used for calculation Returns: results_dict: self_sufficiency_solar, self_sufficiency_batteries, self_consumption_solar, self_consumption_batteries """ results_dict = {} nodes = node_keys if node_keys else meas_dict.keys() for key in nodes: if type(key) == int: key = str(key) if meas_dict[key]: load_p = pd.DataFrame() solar_p = pd.DataFrame() battery_p = pd.DataFrame() for meas in meas_dict[key]: data_df = meas_dict[key][meas] if not data_df.empty: if 'loads' in meas: load_p = pd.concat([load_p, meas_dict[key][meas].iloc[:,column]]) elif 'solar' in meas: solar_p = pd.concat([solar_p, meas_dict[key][meas].iloc[:,column]]) elif 'batteries' in meas: battery_p = pd.concat([battery_p, meas_dict[key][meas].iloc[:,column]]) self_sufficiency_dict = self_sufficiency(load_p, solar_p, battery_p) self_consumption_dict = self_consumption(load_p, solar_p, battery_p) results_dict[key] = self_sufficiency_dict.copy() results_dict[key].update(self_consumption_dict) averages_dict = self_sufficiency_self_consumption_average(results_dict) results_dict.update(averages_dict) return results_dict def network_net_power(meas_dict: dict, node_keys: list = None, column: int = 0) -> dict: """ Calculate the net power (kW) of the network on the point of common coupling (ignoring network structure and losses etc). Import and Export are the net_load with all values set to zero, which are not matching. Note: net_load is calculated by using load, solar and battery values for each node at each time. If your load already has solar factored into it, then you should not pass the solar data on as a separate column in your measurement dict #TODO: consider inverters and how to avoid double counting with solar, batteries Args: meas_dict (dict): dict with measurement for one or multiple nodes. node_keys (list): list of Node.names in Network.nodes. column (int): Column index used for calculation Returns: dictionary of net_load, net_import, net_export """ nodes = node_keys if node_keys else meas_dict.keys() load_p = pd.DataFrame() solar_p = pd.DataFrame() battery_p = pd.DataFrame() for key in nodes: if type(key) == int: key = str(key) if meas_dict[key]: for meas in meas_dict[key]: if 'load' in meas: load_p = pd.concat([load_p, meas_dict[key][meas].iloc[:,column]], axis=1) elif 'solar' in meas: solar_p = pd.concat([solar_p, meas_dict[key][meas].iloc[:,column]], axis=1) elif 'batteries' in meas: battery_p =
pd.concat([battery_p, meas_dict[key][meas].iloc[:,column]],axis=1)
pandas.concat
""" Tests for DatetimeIndex timezone-related methods """ from datetime import date, datetime, time, timedelta, tzinfo import dateutil from dateutil.tz import gettz, tzlocal import numpy as np import pytest import pytz from pandas._libs.tslibs import conversion, timezones import pandas.util._test_decorators as td import pandas as pd from pandas import ( DatetimeIndex, Index, Timestamp, bdate_range, date_range, isna, to_datetime, ) import pandas._testing as tm class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" def __init__(self, offset, name): self.__offset = timedelta(minutes=offset) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return timedelta(0) fixed_off = FixedOffset(-420, "-07:00") fixed_off_no_name = FixedOffset(-330, None) class TestDatetimeIndexTimezones: # ------------------------------------------------------------- # DatetimeIndex.tz_convert def test_tz_convert_nat(self): # GH#5546 dates = [pd.NaT] idx = DatetimeIndex(dates) idx = idx.tz_localize("US/Pacific") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) idx = idx.tz_convert("UTC") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT] idx = DatetimeIndex(dates) idx = idx.tz_localize("US/Pacific") tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) idx = idx + pd.offsets.Hour(5) expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) idx = idx.tz_convert("US/Pacific") expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) idx = idx + np.timedelta64(3, "h") expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) idx = idx.tz_convert("US/Eastern") expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT] tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_dti_tz_convert_compat_timestamp(self, prefix): strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") conv = idx[0].tz_convert(prefix + "US/Pacific") expected = idx.tz_convert(prefix + "US/Pacific")[0] assert conv == expected def test_dti_tz_convert_hour_overflow_dst(self): # Regression test for: # https://github.com/pandas-dev/pandas/issues/13306 # sorted case US/Eastern -> UTC ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] tt = DatetimeIndex(ts).tz_localize("US/Eastern") ut = tt.tz_convert("UTC") expected = Index([13, 14, 13]) tm.assert_index_equal(ut.hour, expected) # sorted case UTC -> US/Eastern ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] tt = DatetimeIndex(ts).tz_localize("UTC") ut = tt.tz_convert("US/Eastern") expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) # unsorted case US/Eastern -> UTC ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] tt = DatetimeIndex(ts).tz_localize("US/Eastern") ut = tt.tz_convert("UTC") expected = Index([13, 14, 13]) tm.assert_index_equal(ut.hour, expected) # unsorted case UTC -> US/Eastern ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"] tt = DatetimeIndex(ts).tz_localize("UTC") ut = tt.tz_convert("US/Eastern") expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): # Regression test for GH#13306 # sorted case US/Eastern -> UTC ts = [ Timestamp("2008-05-12 09:50:00", tz=tz), Timestamp("2008-12-12 09:50:35", tz=tz), Timestamp("2009-05-12 09:50:32", tz=tz), ] tt = DatetimeIndex(ts) ut = tt.tz_convert("UTC") expected = Index([13, 14, 13]) tm.assert_index_equal(ut.hour, expected) # sorted case UTC -> US/Eastern ts = [ Timestamp("2008-05-12 13:50:00", tz="UTC"), Timestamp("2008-12-12 14:50:35", tz="UTC"), Timestamp("2009-05-12 13:50:32", tz="UTC"), ] tt = DatetimeIndex(ts) ut = tt.tz_convert("US/Eastern") expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) # unsorted case US/Eastern -> UTC ts = [ Timestamp("2008-05-12 09:50:00", tz=tz), Timestamp("2008-12-12 09:50:35", tz=tz), Timestamp("2008-05-12 09:50:32", tz=tz), ] tt = DatetimeIndex(ts) ut = tt.tz_convert("UTC") expected = Index([13, 14, 13]) tm.assert_index_equal(ut.hour, expected) # unsorted case UTC -> US/Eastern ts = [ Timestamp("2008-05-12 13:50:00", tz="UTC"), Timestamp("2008-12-12 14:50:35", tz="UTC"), Timestamp("2008-05-12 13:50:32", tz="UTC"), ] tt = DatetimeIndex(ts) ut = tt.tz_convert("US/Eastern") expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) @pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)]) def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): # Regression test for tslib.tz_convert(vals, tz1, tz2). # See https://github.com/pandas-dev/pandas/issues/4496 for details. idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) idx = idx.tz_localize("UTC") idx = idx.tz_convert("Europe/Moscow") expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) tm.assert_index_equal(idx.hour, Index(expected)) def test_dti_tz_convert_dst(self): for freq, n in [("H", 1), ("T", 60), ("S", 3600)]: # Start DST idx = date_range( "2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC" ) idx = idx.tz_convert("US/Eastern") expected = np.repeat( np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), np.array([n, n, n, n, n, n, n, n, n, n, 1]), ) tm.assert_index_equal(idx.hour, Index(expected)) idx = date_range( "2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern" ) idx = idx.tz_convert("UTC") expected = np.repeat( np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([n, n, n, n, n, n, n, n, n, n, 1]), ) tm.assert_index_equal(idx.hour, Index(expected)) # End DST idx = date_range( "2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC" ) idx = idx.tz_convert("US/Eastern") expected = np.repeat( np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), np.array([n, n, n, n, n, n, n, n, n, n, 1]), ) tm.assert_index_equal(idx.hour, Index(expected)) idx = date_range( "2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern" ) idx = idx.tz_convert("UTC") expected = np.repeat( np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]), ) tm.assert_index_equal(idx.hour, Index(expected)) # daily # Start DST idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC") idx = idx.tz_convert("US/Eastern") tm.assert_index_equal(idx.hour, Index([19, 19])) idx = date_range( "2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern" ) idx = idx.tz_convert("UTC") tm.assert_index_equal(idx.hour, Index([5, 5])) # End DST idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC") idx = idx.tz_convert("US/Eastern") tm.assert_index_equal(idx.hour, Index([20, 20])) idx = date_range( "2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern" ) idx = idx.tz_convert("UTC") tm.assert_index_equal(idx.hour, Index([4, 4])) def test_tz_convert_roundtrip(self, tz_aware_fixture): tz = tz_aware_fixture idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC") exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M") idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC") exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D") idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC") exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H") idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC") exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T") for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]: converted = idx.tz_convert(tz) reset = converted.tz_convert(None) tm.assert_index_equal(reset, expected) assert reset.tzinfo is None expected = converted.tz_convert("UTC").tz_localize(None) expected = expected._with_freq("infer") tm.assert_index_equal(reset, expected) def test_dti_tz_convert_tzlocal(self): # GH#13583 # tz_convert doesn't affect to internal dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC") dti2 = dti.tz_convert(dateutil.tz.tzlocal()) tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) dti2 = dti.tz_convert(None) tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) @pytest.mark.parametrize( "tz", [ "US/Eastern", "dateutil/US/Eastern", pytz.timezone("US/Eastern"), gettz("US/Eastern"), ], ) def test_dti_tz_convert_utc_to_local_no_modify(self, tz): rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") rng_eastern = rng.tz_convert(tz) # Values are unmodified tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_tz_convert_unsorted(self, tzstr): dr = date_range("2012-03-09", freq="H", periods=100, tz="utc") dr = dr.tz_convert(tzstr) result = dr[::-1].hour exp = dr.hour[::-1] tm.assert_almost_equal(result, exp) # ------------------------------------------------------------- # DatetimeIndex.tz_localize def test_dti_tz_localize_nonexistent_raise_coerce(self): # GH#13057 times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] index = DatetimeIndex(times) tz = "US/Eastern" with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): index.tz_localize(tz=tz) with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): index.tz_localize(tz=tz, nonexistent="raise") result = index.tz_localize(tz=tz, nonexistent="NaT") test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] dti = to_datetime(test_times, utc=True) expected = dti.tz_convert("US/Eastern") tm.assert_index_equal(result, expected) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_tz_localize_ambiguous_infer(self, tz): # November 6, 2011, fall back, repeat 2 AM hour # With no repeated hours, we cannot infer the transition dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour()) with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): dr.tz_localize(tz) # With repeated hours, we can infer the transition dr = date_range( datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz ) times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] di = DatetimeIndex(times) localized = di.tz_localize(tz, ambiguous="infer") expected = dr._with_freq(None) tm.assert_index_equal(expected, localized) tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer")) # When there is no dst transition, nothing special happens dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) localized = dr.tz_localize(tz) localized_infer = dr.tz_localize(tz, ambiguous="infer") tm.assert_index_equal(localized, localized_infer) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_tz_localize_ambiguous_times(self, tz): # March 13, 2011, spring forward, skip from 2 AM to 3 AM dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour()) with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"): dr.tz_localize(tz) # after dst transition, it works dr = date_range( datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz ) # November 6, 2011, fall back, repeat 2 AM hour dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour()) with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): dr.tz_localize(tz) # UTC is OK dr = date_range( datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc ) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] idx = DatetimeIndex(strdates) conv = idx.tz_localize(tzstr) fromdates = DatetimeIndex(strdates, tz=tzstr) assert conv.tz == fromdates.tz tm.assert_numpy_array_equal(conv.values, fromdates.values) @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_dti_tz_localize(self, prefix): tzstr = prefix + "US/Eastern" dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L") dti2 = dti.tz_localize(tzstr) dti_utc = pd.date_range( start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc" ) tm.assert_numpy_array_equal(dti2.values, dti_utc.values) dti3 = dti2.tz_convert(prefix + "US/Pacific") tm.assert_numpy_array_equal(dti3.values, dti_utc.values) dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L") with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): dti.tz_localize(tzstr) dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L") with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"): dti.tz_localize(tzstr) @pytest.mark.parametrize( "tz", [ "US/Eastern", "dateutil/US/Eastern", pytz.timezone("US/Eastern"), gettz("US/Eastern"), ], ) def test_dti_tz_localize_utc_conversion(self, tz): # Localizing to time zone should: # 1) check for DST ambiguities # 2) convert to UTC rng = date_range("3/10/2012", "3/11/2012", freq="30T") converted = rng.tz_localize(tz) expected_naive = rng + pd.offsets.Hour(5) tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range("3/11/2012", "3/12/2012", freq="30T") # Is this really how it should fail?? with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"): rng.tz_localize(tz) def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): # note: this tz tests that a tz-naive index can be localized # and de-localized successfully, when there are no DST transitions # in the range. idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T") tz = tz_aware_fixture localized = idx.tz_localize(tz) # cant localize a tz-aware object with pytest.raises( TypeError, match="Already tz-aware, use tz_convert to convert" ): localized.tz_localize(tz) reset = localized.tz_localize(None) assert reset.tzinfo is None expected = idx._with_freq(None) tm.assert_index_equal(reset, expected) def test_dti_tz_localize_naive(self): rng = date_range("1/1/2011", periods=100, freq="H") conv = rng.tz_localize("US/Pacific") exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific") tm.assert_index_equal(conv, exp._with_freq(None)) def test_dti_tz_localize_tzlocal(self): # GH#13583 offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) offset = int(offset.total_seconds() * 1000000000) dti = date_range(start="2001-01-01", end="2001-03-01") dti2 = dti.tz_localize(dateutil.tz.tzlocal()) tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) dti2 = dti.tz_localize(None) tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_tz_localize_ambiguous_nat(self, tz): times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] di = DatetimeIndex(times) localized = di.tz_localize(tz, ambiguous="NaT") times = [ "11/06/2011 00:00", np.NaN, np.NaN, "11/06/2011 02:00", "11/06/2011 03:00", ] di_test = DatetimeIndex(times, tz="US/Eastern") # left dtype is datetime64[ns, US/Eastern] # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] tm.assert_numpy_array_equal(di_test.values, localized.values) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_tz_localize_ambiguous_flags(self, tz): # November 6, 2011, fall back, repeat 2 AM hour # Pass in flags to determine right dst transition dr = date_range( datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz ) times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] # Test tz_localize di = DatetimeIndex(times) is_dst = [1, 1, 0, 0, 0] localized = di.tz_localize(tz, ambiguous=is_dst) expected = dr._with_freq(None) tm.assert_index_equal(expected, localized) tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst)) localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) tm.assert_index_equal(dr, localized) localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) tm.assert_index_equal(dr, localized) # Test constructor localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) tm.assert_index_equal(dr, localized) # Test duplicate times where inferring the dst fails times += times di = DatetimeIndex(times) # When the sizes are incompatible, make sure error is raised msg = "Length of ambiguous bool-array must be the same size as vals" with pytest.raises(Exception, match=msg): di.tz_localize(tz, ambiguous=is_dst) # When sizes are compatible and there are repeats ('infer' won't work) is_dst = np.hstack((is_dst, is_dst)) localized = di.tz_localize(tz, ambiguous=is_dst) dr = dr.append(dr) tm.assert_index_equal(dr, localized) # When there is no dst transition, nothing special happens dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) is_dst = np.array([1] * 10) localized = dr.tz_localize(tz) localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) tm.assert_index_equal(localized, localized_is_dst) # TODO: belongs outside tz_localize tests? @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"]) def test_dti_construction_ambiguous_endpoint(self, tz): # construction with an ambiguous end-point # GH#11626 with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): date_range( "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H" ) times = date_range( "2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer" ) assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H") assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H") @pytest.mark.parametrize( "tz, option, expected", [ ["US/Pacific", "shift_forward", "2019-03-10 03:00"], ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"], ["US/Pacific", "shift_backward", "2019-03-10 01:00"], ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"], ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"], ], ) def test_dti_construction_nonexistent_endpoint(self, tz, option, expected): # construction with an nonexistent end-point with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"): date_range( "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H" ) times = date_range( "2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option ) assert times[-1] == Timestamp(expected, tz=tz, freq="H") def test_dti_tz_localize_bdate_range(self): dr = pd.bdate_range("1/1/2009", "1/1/2010") dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) localized = dr.tz_localize(pytz.utc) tm.assert_index_equal(dr_utc, localized) @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) @pytest.mark.parametrize( "method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]] ) def test_dti_tz_localize_nonexistent(self, tz, method, exp): # GH 8917 n = 60 dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") if method == "raise": with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"): dti.tz_localize(tz, nonexistent=method) elif exp == "invalid": msg = ( "The nonexistent argument must be one of " "'raise', 'NaT', 'shift_forward', 'shift_backward' " "or a timedelta object" ) with pytest.raises(ValueError, match=msg): dti.tz_localize(tz, nonexistent=method) else: result = dti.tz_localize(tz, nonexistent=method) expected = DatetimeIndex([exp] * n, tz=tz) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "start_ts, tz, end_ts, shift", [ ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 01:59:59.999999999", "backward", ], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:20:00", timedelta(hours=1), ], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 01:20:00", timedelta(hours=-1), ], ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 01:59:59.999999999", "backward", ], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:33:00", timedelta(hours=1), ], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 01:33:00", timedelta(hours=-1), ], ], ) @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) def test_dti_tz_localize_nonexistent_shift( self, start_ts, tz, end_ts, shift, tz_type ): # GH 8917 tz = tz_type + tz if isinstance(shift, str): shift = "shift_" + shift dti = DatetimeIndex([Timestamp(start_ts)]) result = dti.tz_localize(tz, nonexistent=shift) expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("offset", [-1, 1]) @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type): # GH 8917 tz = tz_type + "Europe/Warsaw" dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) msg = "The provided timedelta will relocalize on a nonexistent time" with pytest.raises(ValueError, match=msg): dti.tz_localize(tz, nonexistent=timedelta(seconds=offset)) # ------------------------------------------------------------- # DatetimeIndex.normalize def test_normalize_tz(self): rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") result = rng.normalize() # does not preserve freq expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") tm.assert_index_equal(result, expected._with_freq(None)) assert result.is_normalized assert not rng.is_normalized rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") result = rng.normalize() expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") tm.assert_index_equal(result, expected) assert result.is_normalized assert not rng.is_normalized rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) result = rng.normalize() # does not preserve freq expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) tm.assert_index_equal(result, expected._with_freq(None)) assert result.is_normalized assert not rng.is_normalized @td.skip_if_windows @pytest.mark.parametrize( "timezone", [ "US/Pacific", "US/Eastern", "UTC", "Asia/Kolkata", "Asia/Shanghai", "Australia/Canberra", ], ) def test_normalize_tz_local(self, timezone): # GH#13459 with tm.set_timezone(timezone): rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) result = rng.normalize() expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) expected = expected._with_freq(None) tm.assert_index_equal(result, expected) assert result.is_normalized assert not rng.is_normalized # ------------------------------------------------------------ # DatetimeIndex.__new__ @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_dti_constructor_static_tzinfo(self, prefix): # it works! index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST") index.hour index[0] def test_dti_constructor_with_fixed_tz(self): off = FixedOffset(420, "+07:00") start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) rng = date_range(start=start, end=end) assert off == rng.tz rng2 = date_range(start, periods=len(rng), tz=off) tm.assert_index_equal(rng, rng2) rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00") assert (rng.values == rng3.values).all() @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_convert_datetime_list(self, tzstr): dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo") dr2 = DatetimeIndex(list(dr), name="foo", freq="D") tm.assert_index_equal(dr, dr2) def test_dti_construction_univalent(self): rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern") rng2 = DatetimeIndex(data=rng, tz="US/Eastern") tm.assert_index_equal(rng, rng2) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_from_tzaware_datetime(self, tz): d = [datetime(2012, 8, 19, tzinfo=tz)] index = DatetimeIndex(d) assert timezones.tz_compare(index.tz, tz) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_constructors(self, tzstr): """Test different DatetimeIndex constructions with timezone Follow-up of GH#4229 """ arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] idx1 = to_datetime(arr).tz_localize(tzstr) idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr) idx2 = idx2._with_freq(None) # the others all have freq=None idx3 = DatetimeIndex(arr, tz=tzstr) idx4 = DatetimeIndex(np.array(arr), tz=tzstr) for other in [idx2, idx3, idx4]: tm.assert_index_equal(idx1, other) # ------------------------------------------------------------- # Unsorted @pytest.mark.parametrize( "dtype", [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], ) def test_date_accessor(self, dtype): # Regression test for GH#21230 expected = np.array([date(2018, 6, 4), pd.NaT]) index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype) result = index.date tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( "dtype", [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], ) def test_time_accessor(self, dtype): # Regression test for GH#21267 expected = np.array([time(10, 20, 30), pd.NaT]) index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype) result = index.time tm.assert_numpy_array_equal(result, expected) def test_timetz_accessor(self, tz_naive_fixture): # GH21358 tz = timezones.maybe_get_tz(tz_naive_fixture) expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz) result = index.timetz tm.assert_numpy_array_equal(result, expected) def test_dti_drop_dont_lose_tz(self): # GH#2621 ind = date_range("2012-12-01", periods=10, tz="utc") ind = ind.drop(ind[-1]) assert ind.tz is not None def test_dti_tz_conversion_freq(self, tz_naive_fixture): # GH25241 t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H") assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T") assert t4.tz_convert(tz="UTC").freq == t4.freq def test_drop_dst_boundary(self): # see gh-18031 tz = "Europe/Brussels" freq = "15min" start = Timestamp("201710290100", tz=tz) end = Timestamp("201710290300", tz=tz) index = pd.date_range(start=start, end=end, freq=freq) expected = DatetimeIndex( [ "201710290115", "201710290130", "201710290145", "201710290200", "201710290215", "201710290230", "201710290245", "201710290200", "201710290215", "201710290230", "201710290245", "201710290300", ], tz=tz, freq=freq, ambiguous=[ True, True, True, True, True, True, True, False, False, False, False, False, ], ) result = index.drop(index[0]) tm.assert_index_equal(result, expected) def test_date_range_localize(self): rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern") rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern") rng3 = date_range("3/11/2012 03:00", periods=15, freq="H") rng3 = rng3.tz_localize("US/Eastern") tm.assert_index_equal(rng._with_freq(None), rng3) # DST transition time val = rng[0] exp = Timestamp("3/11/2012 03:00", tz="US/Eastern") assert val.hour == 3 assert exp.hour == 3 assert val == exp # same UTC value tm.assert_index_equal(rng[:2], rng2) # Right before the DST transition rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern") rng2 = DatetimeIndex( ["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H" ) tm.assert_index_equal(rng, rng2) exp = Timestamp("3/11/2012 00:00", tz="US/Eastern") assert exp.hour == 0 assert rng[0] == exp exp = Timestamp("3/11/2012 01:00", tz="US/Eastern") assert exp.hour == 1 assert rng[1] == exp rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern") assert rng[2].hour == 3 def test_timestamp_equality_different_timezones(self): utc_range = date_range("1/1/2000", periods=20, tz="UTC") eastern_range = utc_range.tz_convert("US/Eastern") berlin_range = utc_range.tz_convert("Europe/Berlin") for a, b, c in zip(utc_range, eastern_range, berlin_range): assert a == b assert b == c assert a == c assert (utc_range == eastern_range).all() assert (utc_range == berlin_range).all() assert (berlin_range == eastern_range).all() def test_dti_intersection(self): rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") left = rng[10:90][::-1] right = rng[20:80][::-1] assert left.tz == rng.tz result = left.intersection(right) assert result.tz == left.tz def test_dti_equals_with_tz(self): left = date_range("1/1/2011", periods=100, freq="H", tz="utc") right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern") assert not left.equals(right) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_nat(self, tzstr): idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT]) assert isna(idx[1]) assert idx[0].tzinfo is not None @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_astype_asobject_tzinfos(self, tzstr): # GH#1345 # dates around a dst transition rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) objs = rng.astype(object) for i, x in enumerate(objs): exval = rng[i] assert x == exval assert x.tzinfo == exval.tzinfo objs = rng.astype(object) for i, x in enumerate(objs): exval = rng[i] assert x == exval assert x.tzinfo == exval.tzinfo @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_with_timezone_repr(self, tzstr): rng = date_range("4/13/2010", "5/6/2010") rng_eastern = rng.tz_localize(tzstr) rng_repr = repr(rng_eastern) assert "2010-04-13 00:00:00" in rng_repr @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_take_dont_lose_meta(self, tzstr): rng = date_range("1/1/2000", periods=20, tz=tzstr) result = rng.take(range(5)) assert result.tz == rng.tz assert result.freq == rng.freq @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_utc_box_timestamp_and_localize(self, tzstr): tz = timezones.maybe_get_tz(tzstr) rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") rng_eastern = rng.tz_convert(tzstr) expected = rng[-1].astimezone(tz) stamp = rng_eastern[-1] assert stamp == expected assert stamp.tzinfo == expected.tzinfo # right tzinfo rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc") rng_eastern = rng.tz_convert(tzstr) # test not valid for dateutil timezones. # assert 'EDT' in repr(rng_eastern[0].tzinfo) assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr( rng_eastern[0].tzinfo ) def test_dti_to_pydatetime(self): dt = dateutil.parser.parse("2012-06-13T01:39:00Z") dt = dt.replace(tzinfo=tzlocal()) arr = np.array([dt], dtype=object) result = to_datetime(arr, utc=True) assert result.tz is pytz.utc rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal()) arr = rng.to_pydatetime() result = to_datetime(arr, utc=True) assert result.tz is pytz.utc def test_dti_to_pydatetime_fizedtz(self): dates = np.array( [ datetime(2000, 1, 1, tzinfo=fixed_off), datetime(2000, 1, 2, tzinfo=fixed_off), datetime(2000, 1, 3, tzinfo=fixed_off), ] ) dti = DatetimeIndex(dates) result = dti.to_pydatetime() tm.assert_numpy_array_equal(dates, result) result = dti._mpl_repr() tm.assert_numpy_array_equal(dates, result) @pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")]) def test_with_tz(self, tz): # just want it to work start = datetime(2011, 3, 12, tzinfo=pytz.utc) dr = bdate_range(start, periods=50, freq=pd.offsets.Hour()) assert dr.tz is pytz.utc # DateRange with naive datetimes dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc) dr = bdate_range("1/1/2005", "1/1/2009", tz=tz) # normalized central = dr.tz_convert(tz) assert central.tz is tz naive = central[0].to_pydatetime().replace(tzinfo=None) comp = conversion.localize_pydatetime(naive, tz).tzinfo assert central[0].tz is comp # compare vs a localized tz naive = dr[0].to_pydatetime().replace(tzinfo=None) comp = conversion.localize_pydatetime(naive, tz).tzinfo assert central[0].tz is comp # datetimes with tzinfo set dr = bdate_range( datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc) ) msg = "Start and end cannot both be tz-aware with different timezones" with pytest.raises(Exception, match=msg): bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz) @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_field_access_localize(self, prefix): strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") assert (rng.hour == 0).all() # a more unusual time zone, #1946 dr = date_range( "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" ) expected = Index(np.arange(10, dtype=np.int64)) tm.assert_index_equal(dr.hour, expected) @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_convert_tz_aware_datetime_datetime(self, tz): # GH#1581 dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)] dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates] result = DatetimeIndex(dates_aware) assert timezones.tz_compare(result.tz, tz) converted = to_datetime(dates_aware, utc=True) ex_vals = np.array([Timestamp(x).value for x in dates_aware]) tm.assert_numpy_array_equal(converted.asi8, ex_vals) assert converted.tz is pytz.utc def test_dti_union_aware(self): # non-overlapping rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central") rng2 =
date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern")
pandas.date_range
import logging import numpy as np import pandas as pd from icubam.analytics import dataset SPREAD_CUM_JUMPS_MAX_JUMP = { "n_covid_deaths": 10, "n_covid_transfered": 10, "n_covid_refused": 10, "n_covid_healed": 10, } def format_data(d: pd.DataFrame) -> pd.DataFrame: d["datetime"] = pd.to_datetime(d["create_date"]) d["date"] = d["datetime"].dt.date d["department"] = d["icu_dept"] d["region"] = d["icu_region_name"] d["region_id"] = d["icu_region_id"] d = d[dataset.ALL_COLUMNS] return d def preprocess_bedcounts( d: pd.DataFrame, spread_cum_jump_correction: bool = False, max_date: bool = None, ) -> pd.DataFrame: """This will process the bedcounts data to make analysis easier. There are five steps to the processing; 1) Run a low-pass filter over all timeseries to remove single spikes that generally represent a data entry error. 2) Aggregate the timeseries into their closest T-min intervals (T=15). This helps remove repeate updates, and takes the most recent update for a T-min window, such that if there was a correction to bad data that will be the only value for that time window. 3) Guarantee monotonicity on cumulative counts by replacing any decreasing values in the timeseries with their previous count: x_t = max(x_t, x_{t+1}). 4) Imput missing data with two strategies: For holes > 3 days, impute data by linearly interpolating between the two end-points of the missing set Subsequently, guarantee that each ICU has data for the whole timeseries, either by forward-propagating data at day t for impution, or setting to 0 for days before the ICU started its data collection. 5) (Optional) Spread out sudden jumps in data that reflect onboardings or change in reporting habit. Args: spread_cum_jump_correction : Whether to apply step 4) to the data. max_date : Only return data up to this date. """ # Extract useful columns and recast date properly: d = format_data(d) d = d.fillna(0) if "Mulhouse-Chir" in d.icu_name.unique(): d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_healed"] = np.clip( ( d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_healed"] - d.loc[d.icu_name == "Mulhouse-Chir", "n_covid_transfered"] ).values, a_min=0, a_max=None, ) icu_to_first_input_date = dict( d.groupby("icu_name")[["date"]].min().itertuples(name=None) ) # Apply steps 1) 2) & 3) d = aggregate_multiple_inputs(d, "15Min") # Step 3) d = fill_in_missing_days(d, "3D") d = enforce_daily_values_for_all_icus(d) # Step 4) if spread_cum_jump_correction: d = spread_cum_jumps(d, icu_to_first_input_date) d = d[dataset.ALL_COLUMNS] d = d.sort_values(by=["date", "icu_name"]) if max_date is not None: logging.info("data loaded's max date will be %s (excluded)" % max_date) d = d.loc[d.date < pd.to_datetime(max_date).date()] return d def aggregate_multiple_inputs(d, agg_time_delta="15Min"): """Aggregate the timeseries into time bins. This will aggregate the timeseries into regular time intervals, and use the most recent update prior to time t to populate the bin at time t. """ res_dfs = [] for icu_name, dg in d.groupby("icu_name"): dg = dg.set_index("datetime") dg = dg.sort_index() td_diff = dg.index.to_series().diff(1) mask = td_diff > pd.Timedelta(agg_time_delta) mask = mask.shift(-1).fillna(True).astype(bool) dg = dg.loc[mask] # This will run low-pass filters to remove spurious outliers: # Rolling median average, 5 points (for cumulative qtities): # breakpoint() for col in dataset.CUM_COLUMNS: dg[col] = ( dg[col].rolling(5, center=True, min_periods=1).median().astype(int) ) # Rolling median average, 3 points (for non-cumulative qtities): for col in dataset.NCUM_COLUMNS: dg[col] = dg[col].fillna(0) dg[col] = ( dg[col].rolling(3, center=True, min_periods=1).median().astype(int) ) # Force cumulative columns to be monotonic by bringing any decreases in # the value up to their previous values i.e. x_t = max(x_t, x_{t-1}): dg[dataset.CUM_COLUMNS ] = np.maximum.accumulate(dg[dataset.CUM_COLUMNS].values, axis=0) res_dfs.append(dg.reset_index()) return pd.concat(res_dfs) def fill_in_missing_days(d, time_delta_threshold="3D"): """Group the timeseries into days, and impute data linearly for holes in the data superior to 3 days. """ res_dfs = [] for icu_name, dg in d.groupby("icu_name"): dg = dg.sort_values(by=["datetime"]) time_delta = dg["datetime"].diff(1) for i, td in enumerate(time_delta): if td > pd.Timedelta(time_delta_threshold): n_days = td // pd.Timedelta("1D") val_init = dg.iloc[i - 1] val_final = dg.iloc[i] for added_day in range(n_days): added_datetime = (val_init.datetime + pd.Timedelta("1D") * added_day) added_date = val_init.date + pd.Timedelta("1D") * added_day new_row = { "datetime": added_datetime, "icu_name": val_init.icu_name, "date": added_date, "department": val_init.department, "n_covid_deaths": np.round( val_init.n_covid_deaths + (val_final.n_covid_deaths - val_init.n_covid_deaths) * added_day * 1.0 / n_days, 4, ), "n_covid_healed": np.round( val_init.n_covid_healed + (val_final.n_covid_healed - val_init.n_covid_healed) * added_day * 1.0 / n_days, 4, ), "n_covid_transfered": np.round( val_init.n_covid_transfered + (val_final.n_covid_transfered - val_init.n_covid_transfered) * added_day * 1.0 / n_days, 4, ), "n_covid_refused": np.round( val_init.n_covid_refused + (val_final.n_covid_refused - val_init.n_covid_refused) * added_day * 1.0 / n_days, 4, ), "n_covid_free": np.round( val_init.n_covid_free + (val_final.n_covid_free - val_init.n_covid_free) * added_day * 1.0 / n_days, 4, ), "n_ncovid_free": np.round( val_init.n_ncovid_free + (val_final.n_ncovid_free - val_init.n_ncovid_free) * added_day * 1.0 / n_days, 4, ), "n_covid_occ": np.round( val_init.n_covid_occ + (val_final.n_covid_occ - val_init.n_covid_occ) * added_day * 1.0 / n_days, 4, ), "n_ncovid_occ": np.round( val_init.n_ncovid_occ + (val_final.n_ncovid_occ - val_init.n_ncovid_occ) * added_day * 1.0 / n_days, 4, ), } dg = dg.append(pd.Series(new_row), ignore_index=True) dg = dg.sort_values(by=["datetime"]) res_dfs.append(dg) return pd.concat(res_dfs) def enforce_daily_values_for_all_icus(d): """Guarantee that each ICU has a continuous daily timeseries. Each missing day in the series is imputed by forward-filling from the most recent day with data. """ dates = np.sort(d.date.unique()) def reindex_icu(x): # Process data for an ICU. # For repeated entries per day, only keep the last entry. # This is necessary as we cannot re-index indexes with duplicates. x = x.sort_values('datetime').drop_duplicates(['date'], keep='last') # forward fill all missing values x = x.set_index(['date']).reindex(dates, method='ffill').reset_index() # backward fill categorical variables (that don't change with time) cat_columns = ['icu_name', 'department', 'region', 'region_id'] x[cat_columns] = x[cat_columns].fillna(method='bfill') # Set all other variables to 0 before first observation int_columns = dataset.CUM_COLUMNS + dataset.NCUM_COLUMNS x[int_columns] = x[int_columns].fillna(0) # Leave all unknown variables as NaN return x df = d.groupby('icu_name').apply(reindex_icu) # Reproduce behaviour of earlier versions of this function df['datetime'] = df['date'] df['create_date'] = df['date'] return df.reset_index(drop=True) def spread_cum_jumps(d, icu_to_first_input_date): assert np.all(d.date.values == d.datetime.values) # TODO: do not hardcode this value date_begin_transfered_refused = pd.to_datetime("2020-03-25").date() dfs = [] for icu_name, dg in d.groupby("icu_name"): dg = dg.sort_values(by="date") dg = dg.reset_index() already_fixed_col = set() for switch_point, cols in ( (icu_to_first_input_date[icu_name], dataset.CUM_COLUMNS), ( date_begin_transfered_refused, ["n_covid_transfered", "n_covid_refused"], ), ): beg = max( dg.date.min(), switch_point - pd.Timedelta("2D"), ) end = min( dg.date.max(), switch_point + pd.Timedelta("2D"), ) for col in cols: if col in already_fixed_col: continue beg_val = dg.loc[dg.date == beg, col] if not len(beg_val): continue beg_val = beg_val.values[0] end_val = dg.loc[dg.date == end, col] if not len(end_val): continue end_val = end_val.values[0] diff = end_val - beg_val if diff >= SPREAD_CUM_JUMPS_MAX_JUMP[col]: spread_beg = dg.date.min() spread_end = end spread_range =
pd.date_range(spread_beg, spread_end, freq="1D")
pandas.date_range
"""Simple Flask server which runs the frontend.""" import ast import redis import pandas as pd from flask import Flask, render_template from optparse import OptionParser app = Flask(__name__) r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True) @app.route("/", methods=['GET', 'POST']) def index(): """Function that render index html page.""" dict_ = {} dict_["tweet_count"] = r.get("tweet_count") dict_["day"] = ast.literal_eval(r.hget("stats", "day")) dict_["date"] = ast.literal_eval(r.hget("stats", "date")) for key in ['lang', 'word_cloud', 'hashtags', 'users', 'loc']: rs = ast.literal_eval(r.hget("stats", key)) dict_[key] = ((
pd.Series(rs)
pandas.Series
# coding=utf-8 # pylint: disable-msg=E1101,W0612 import numpy as np import pytest from pandas.compat import lrange, range import pandas as pd from pandas import DataFrame, Index, Series import pandas.util.testing as tm from pandas.util.testing import assert_series_equal def test_get(): # GH 6383 s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54])) result = s.get(25, 0) expected = 0 assert result == expected s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54]), index=pd.Float64Index( [25.0, 36.0, 49.0, 64.0, 81.0, 100.0, 121.0, 144.0, 169.0, 196.0, 1225.0, 1296.0, 1369.0, 1444.0, 1521.0, 1600.0, 1681.0, 1764.0, 1849.0, 1936.0], dtype='object')) result = s.get(25, 0) expected = 43 assert result == expected # GH 7407 # with a boolean accessor df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3}) vc = df.i.value_counts() result = vc.get(99, default='Missing') assert result == 'Missing' vc = df.b.value_counts() result = vc.get(False, default='Missing') assert result == 3 result = vc.get(True, default='Missing') assert result == 'Missing' def test_get_nan(): # GH 8569 s = pd.Float64Index(range(10)).to_series() assert s.get(np.nan) is None assert s.get(np.nan, default='Missing') == 'Missing' def test_get_nan_multiple(): # GH 8569 # ensure that fixing "test_get_nan" above hasn't broken get # with multiple elements s = pd.Float64Index(range(10)).to_series() idx = [2, 30] with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert_series_equal(s.get(idx), Series([2, np.nan], index=idx)) idx = [2, np.nan] with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert_series_equal(s.get(idx), Series([2, np.nan], index=idx)) # GH 17295 - all missing keys idx = [20, 30] assert(s.get(idx) is None) idx = [np.nan, np.nan] assert(s.get(idx) is None) def test_delitem(): # GH 5542 # should delete the item inplace s = Series(lrange(5)) del s[0] expected = Series(lrange(1, 5), index=lrange(1, 5)) assert_series_equal(s, expected) del s[1] expected = Series(lrange(2, 5), index=lrange(2, 5)) assert_series_equal(s, expected) # empty s = Series() with pytest.raises(KeyError): del s[0] # only 1 left, del, add, del s = Series(1) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='int64'))) s[0] = 1 assert_series_equal(s, Series(1)) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='int64'))) # Index(dtype=object) s = Series(1, index=['a']) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='object'))) s['a'] = 1 assert_series_equal(s, Series(1, index=['a'])) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='object'))) def test_slice_float64(): values = np.arange(10., 50., 2) index = Index(values) start, end = values[[5, 15]] s = Series(np.random.randn(20), index=index) result = s[start:end] expected = s.iloc[5:16] assert_series_equal(result, expected) result = s.loc[start:end] assert_series_equal(result, expected) df = DataFrame(np.random.randn(20, 3), index=index) result = df[start:end] expected = df.iloc[5:16] tm.assert_frame_equal(result, expected) result = df.loc[start:end] tm.assert_frame_equal(result, expected) def test_getitem_negative_out_of_bounds(): s = Series(
tm.rands_array(5, 10)
pandas.util.testing.rands_array
import logging import os import pickle import re import string import time import numpy as np import pandas as pd import requests from fastapi import FastAPI, Request from fastapi_utils.tasks import repeat_every from scipy.spatial import distance # import scipy from config import * from image_extractor import ImageExtractor from search import SearchIndex from sentence_vectorizer import SentenceVectorizer app = FastAPI() logger = logging.getLogger(__name__) sv = SentenceVectorizer() ie = ImageExtractor() search_index = SearchIndex() templates_list = [] templates = pd.DataFrame() popular_memes =
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python """ analyse Elasticsearch query """ import json from elasticsearch import Elasticsearch from elasticsearch import logger as es_logger from collections import defaultdict, Counter import re import os from datetime import datetime # Preprocess terms for TF-IDF import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # progress bar from tqdm import tqdm # ploting import matplotlib.pyplot as plt # LOG import logging from logging.handlers import RotatingFileHandler # Word embedding for evaluation from sentence_transformers import SentenceTransformer from sklearn.manifold import TSNE import seaborn as sns from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.metrics.pairwise import cosine_similarity from scipy import sparse import scipy.spatial as sp # Spatial entity as descriptor : from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter # venn from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud import operator # Global var on Levels on spatial and temporal axis spatialLevels = ['city', 'state', 'country'] temporalLevels = ['day', 'week', 'month', 'period'] def elasticsearch_query(query_fname, logger): """ Build a ES query and return a default dict with resuls :return: tweetsByCityAndDate """ # Elastic search credentials client = Elasticsearch("http://localhost:9200") es_logger.setLevel(logging.WARNING) index = "twitter" # Define a Query query = open(query_fname, "r").read() result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000) # Append all pages form scroll search : avoid the 10k limitation of ElasticSearch results = avoid10kquerylimitation(result, client, logger) # Initiate a dict for each city append all Tweets content tweetsByCityAndDate = defaultdict(list) for hits in results: # parse Java date : EEE MMM dd HH:mm:ss Z yyyy inDate = hits["_source"]["created_at"] parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y") try:# geodocing may be bad geocoding = hits["_source"]["rest"]["features"][0]["properties"] except: continue # skip this iteraction if "country" in hits["_source"]["rest"]["features"][0]["properties"]: # locaties do not necessarily have an associated stated try: cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: # there is no state in geocoding try: logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state") cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \ str("none") + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: # there is no city as well : only country # print(json.dumps(hits["_source"], indent=4)) try: # cityStateCountry = str("none") + "_" + \ str("none") + "_" + \ str(hits["_source"]["rest"]["features"][0]["properties"]["country"]) except: cityStateCountry = str("none") + "_" + \ str("none") + "_" + \ str("none") try: tweetsByCityAndDate[cityStateCountry].append( { "tweet": preprocessTweets(hits["_source"]["full_text"]), "created_at": parseDate } ) except: print(json.dumps(hits["_source"], indent=4)) # biotexInputBuilder(tweetsByCityAndDate) # pprint(tweetsByCityAndDate) return tweetsByCityAndDate def avoid10kquerylimitation(result, client, logger): """ Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll This method append all pages form scroll search :param result: a result of a ElasticSearcg query :return: """ scroll_size = result['hits']['total']["value"] logger.info("Number of elasticsearch scroll: " + str(scroll_size)) results = [] # Progress bar pbar = tqdm(total=scroll_size) while (scroll_size > 0): try: scroll_id = result['_scroll_id'] res = client.scroll(scroll_id=scroll_id, scroll='60s') results += res['hits']['hits'] scroll_size = len(res['hits']['hits']) pbar.update(scroll_size) except: pbar.close() logger.error("elasticsearch search scroll failed") break pbar.close() return results def preprocessTweets(text): """ 1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1 2 - Detection lang 3 - remove stopword ?? :param text: :return: list : texclean, and langue detected """ ## 1 clean up twetts # remove URLs textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text) textclean = re.sub(r'http\S+', '', textclean) # remove usernames # textclean = re.sub('@[^\s]+', '', textclean) # remove the # in #hashtag # textclean = re.sub(r'#([^\s]+)', r'\1', textclean) return textclean def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger): """ Create a matrix of : - line : (city,day) - column : terms - value of cells : TF (term frequency) Help found here : http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76 :param tweetsofcity: :param matrixAggDay_fout: file to save :param matrixOccurence_fout: file to save :return: """ # initiate matrix of tweets aggregate by day # col = ['city', 'day', 'tweetsList', 'bow'] col = ['city', 'day', 'tweetsList'] matrixAggDay = pd.DataFrame(columns=col) cityDayList = [] logger.info("start full_text concatenation for city & day") pbar = tqdm(total=len(tweetsofcity)) for city in tweetsofcity: # create a table with 2 columns : tweet and created_at for a specific city matrix = pd.DataFrame(tweetsofcity[city]) # Aggregate list of tweets by single day for specifics cities ## Loop on days for a city period = matrix['created_at'].dt.date period = period.unique() period.sort() for day in period: # aggregate city and date document document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist()) # Bag of Words and preprocces # preproccesFullText = preprocessTerms(document) tweetsOfDayAndCity = { 'city': city, 'day': day, 'tweetsList': document } cityDayList.append(city + "_" + str(day)) try: matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True) except: print("full_text empty after pre-process: "+document) continue pbar.update(1) pbar.close() if save_intermediaire_files: logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout)) matrixAggDay.to_csv(matrixAggDay_fout) # Count terms with sci-kit learn cd = CountVectorizer( stop_words='english', #preprocessor=sklearn_vectorizer_no_number_preprocessor, #min_df=2, # token at least present in 2 cities : reduce size of matrix max_features=25000, ngram_range=(1, 1), token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue # strip_accents= "ascii" # remove token with special character (trying to keep only english word) ) cd.fit(matrixAggDay['tweetsList']) res = cd.transform(matrixAggDay["tweetsList"]) countTerms = res.todense() # create matrix ## get terms : # voc = cd.vocabulary_ # listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])} listOfTerms = cd.get_feature_names() ##initiate matrix with count for each terms matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms) # save to file if save_intermediaire_files: logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout)) matrixOccurence.to_csv(matrixOccurence_fout) return matrixOccurence def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'): """ Filter matrix with list of cities and a period :param matrix: :param listOfcities: :param spatialLevel: :param period: :param temporalLevel: :return: matrix filtred """ if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels: print("wrong level, please double check") return 1 # Extract cities and period ## cities if listOfcities != 'all': ### we need to filter ###Initiate a numpy array of False filter = np.zeros((1, len(matrix.index)), dtype=bool)[0] for city in listOfcities: ### edit filter if index contains the city (for each city of the list) filter += matrix.index.str.startswith(str(city) + "_") matrix = matrix.loc[filter] ##period if str(period) != 'all': ### we need a filter on date datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0] for date in period: datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d')) matrix = matrix.loc[datefilter] return matrix def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'): """ Aggregate on spatial and temporel and then compute TF-IDF :param matrixOcc: Matrix with TF already compute :param listOfcities: filter on this cities :param spatialLevel: city / state / country / world :param period: Filter on this period :param temporalLevel: day / week (month have to be implemented) :return: """ matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities, spatialLevel='state', period=period) # Aggregate by level ## Create 4 new columns : city, State, Country and date def splitindex(row): return row.split("_") matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \ zip(*matrixOcc.index.map(splitindex)) if temporalLevel == 'day': ## In space if spatialLevel == 'city': # do nothing pass elif spatialLevel == 'state' and temporalLevel == 'day': matrixOcc = matrixOcc.groupby("state").sum() elif spatialLevel == 'country' and temporalLevel == 'day': matrixOcc = matrixOcc.groupby("country").sum() elif temporalLevel == "week": matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime ## in space and time if spatialLevel == 'country': matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum() elif spatialLevel == 'state': matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum() elif spatialLevel == 'city': matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum() # Compute TF-IDF ## compute TF : for each doc, devide count by Sum of all count ### Sum fo all count by row matrixOcc['sumCount'] = matrixOcc.sum(axis=1) ### Devide each cell by these sums listOfTerms = matrixOcc.keys() matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0) ## Compute IDF : create a vector of length = nb of termes with IDF value idf = pd.Series(index=matrixOcc.keys(), dtype=float) ### N : nb of doucments <=> nb of rows : N = matrixOcc.shape[0] ### DFt : nb of document that contains the term DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True #### Not a Number when value 0 because otherwise log is infinite DFt.replace(0, np.nan, inplace=True) ### compute log(N/DFt) idf = np.log10(N / (DFt)) # idf = np.log10( N / (DFt * 10)) ## compute TF-IDF matrixTFIDF = matrixOcc * idf # matrixTFIDF = matrixOcc * idf * idf ## remove terms if for all documents value are Nan matrixTFIDF.dropna(axis=1, how='all', inplace=True) # Save file matrixTFIDF.to_csv(matrixHTFIDF_fname) # Export N biggest TF-IDF score: top_n = 500 extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n)) for row in matrixTFIDF.index: try: row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0 row_without_zero = row_without_zero[ row_without_zero !=0 ] try: extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys() except: extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys() except: logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms") extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv") # Transpose this table in order to share the same structure with TF-IDF classifical biggest score : hbt = pd.DataFrame() extractBiggest = extractBiggest.reset_index() for index, row in extractBiggest.iterrows(): hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"]) hbtrow[spatialLevel] = row[spatialLevel] hbtrow["date"] = row["date"] hbt = hbt.append(hbtrow, ignore_index=True) hbt.to_csv(biggestHTFIDFscore_fname) def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./", spatial_hiearchy="country", temporal_period='all', listOfCities='all'): """ Compute TFIDF and TF from an elastic query file 1 doc = 1 tweet Corpus = by hiearchy level, i.e. : state or country :param elastic_query_fname: filename and path of the elastic query :param logger: logger of the main program :param nb_biggest_terms: How many biggest term are to keep :param spatial_hiearchy: define the size of the corpus : state or country :param temporal_period: :param listOfCities: If you want to filter out some cities, you can :return: """ # tfidfStartDate = date(2020, 1, 23) # tfidfEndDate = date(2020, 1, 30) # temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate) # listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff'] # listOfState = ["England", "Scotland", "Northern Ireland", "Wales"] tweets = elasticsearch_query(elastic_query_fname, logger) if listOfCities == 'all': listOfCities = [] listOfStates = [] listOfCountry = [] for triple in tweets: splitted = triple.split("_") listOfCities.append(splitted[0]) listOfStates.append(splitted[1]) listOfCountry.append(splitted[2]) listOfCities = list(set(listOfCities)) listOfStates = list(set(listOfStates)) listOfCountry = list(set(listOfCountry)) # reorganie tweets (dict : tweets by cities) into dataframe (city and date) matrixAllTweets = pd.DataFrame() for tweetByCity in tweets.keys(): # Filter cities : city = str(tweetByCity).split("_")[0] state = str(tweetByCity).split("_")[1] country = str(tweetByCity).split("_")[2] if city in listOfCities: matrix = pd.DataFrame(tweets[tweetByCity]) matrix['city'] = city matrix['state'] = state matrix['country'] = country matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True) # Split datetime into date and time matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']] matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']] # Filter by a period if temporal_period != "all": mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max())) matrixAllTweets = matrixAllTweets.loc[mask] # Compute TF-IDF and TF by state extractBiggestTF_allstates = pd.DataFrame() extractBiggestTFIDF_allstates = pd.DataFrame() if spatial_hiearchy == "country": listOfLocalities = listOfCountry elif spatial_hiearchy == "state": listOfLocalities = listOfStates elif spatial_hiearchy == "city": listOfLocalities = listOfCities for locality in listOfLocalities: matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality] vectorizer = TfidfVectorizer( stop_words='english', min_df=0.001, # max_features=50000, ngram_range=(1, 1), token_pattern='[<KEY>', ) # logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy) try: vectors = vectorizer.fit_transform(matrix_by_locality['tweet']) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() except: logger.info("Impossible to compute TF-IDF on: "+locality) continue ## matrixTFIDF TFIDFClassical = pd.DataFrame(denselist, columns=feature_names) locality_format = locality.replace("/", "_") locality_format = locality_format.replace(" ", "_") if save_intermediaire_files: logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv") TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv") ## Extract N TOP ranking score extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms) extractBiggest = extractBiggest.to_frame() extractBiggest = extractBiggest.reset_index() extractBiggest.columns = ['terms', 'score'] extractBiggest[spatial_hiearchy] = locality extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True) """ # Compute TF tf = CountVectorizer( stop_words='english', min_df=2, ngram_range=(1,2), token_pattern='[a-zA-Z0-9@#]+', ) try: tf.fit(matrix_by_locality['tweet']) tf_res = tf.transform(matrix_by_locality['tweet']) listOfTermsTF = tf.get_feature_names() countTerms = tf_res.todense() except:# locality does not have enough different term logger.info("Impossible to compute TF on: "+locality) continue ## matrixTF TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF) ### save in file logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv") TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv") ## Extract N TOP ranking score extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms) extractBiggestTF = extractBiggestTF.to_frame() extractBiggestTF = extractBiggestTF.reset_index() extractBiggestTF.columns = ['terms', 'score'] extractBiggestTF[spatial_hiearchy] = locality extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True) """ logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score") extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv") extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv") def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./", temporal_period='all', listOfCities='all'): """ Compute TFIDF and TF from an elastic query file 1 doc = 1 tweet Corpus = on the whole elastic query (with filter out cities that are not in listOfCities :param elastic_query_fname: filename and path of the elastic query :param logger: logger of the main program :param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or TF-IDF classical on corpus = localité because a lot of temrs have 1.0 has the score :param spatial_hiearchy: define the size of the corpus : state or country :param temporal_period: :param listOfCities: If you want to filter out some cities, you can :return: """ # tfidfStartDate = date(2020, 1, 23) # tfidfEndDate = date(2020, 1, 30) # temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate) # listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff'] # listOfState = ["England", "Scotland", "Northern Ireland", "Wales"] # Query Elasticsearch to get all tweets from UK tweets = elasticsearch_query(elastic_query_fname, logger) if listOfCities == 'all': listOfCities = [] listOfStates = [] listOfCountry = [] for triple in tweets: splitted = triple.split("_") listOfCities.append(splitted[0]) listOfStates.append(splitted[1]) listOfCountry.append(splitted[2]) listOfCities = list(set(listOfCities)) listOfStates = list(set(listOfStates)) listOfCountry = list(set(listOfCountry)) # reorganie tweets (dict : tweets by cities) into dataframe (city and date) matrixAllTweets = pd.DataFrame() for tweetByCity in tweets.keys(): # Filter cities : city = str(tweetByCity).split("_")[0] state = str(tweetByCity).split("_")[1] country = str(tweetByCity).split("_")[2] if city in listOfCities: matrix = pd.DataFrame(tweets[tweetByCity]) matrix["country"] = country matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True) # Split datetime into date and time matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']] matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']] # Filter by a period if temporal_period != "all": mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max())) matrixAllTweets = matrixAllTweets.loc[mask] vectorizer = TfidfVectorizer( stop_words='english', min_df=0.001, # max_features=50000, ngram_range=(1, 1), token_pattern='[a-zA-Z0-9#]+', #remove user name, i.e term starting with @ for personnal data issue ) try: vectors = vectorizer.fit_transform(matrixAllTweets['tweet']) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() except: logger.info("Impossible to compute TF-IDF") exit(-1) ## matrixTFIDF TFIDFClassical = pd.DataFrame(denselist, columns=feature_names) TFIDFClassical["country"] = matrixAllTweets["country"] if save_intermediaire_files: logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_whole_corpus.csv") TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_whole_corpus.csv") extractBiggest = pd.DataFrame() for term in TFIDFClassical.keys(): try: index = TFIDFClassical[term].idxmax() score = TFIDFClassical[term].max() country = TFIDFClassical.iloc[index]["country"] row = { 'terms': term, 'score': score, 'country': country } extractBiggest = extractBiggest.append(row, ignore_index=True) except: logger.info(term+' : '+str(index)+" : "+str(score)+" : "+country) ## Extract N TOP ranking score # extractBiggest = TFIDFClassical.max() extractBiggest = extractBiggest[extractBiggest['score'] == 1] # we keep only term with high score TF-IDF, i.e 1.0 # extractBiggest = extractBiggest.to_frame() # extractBiggest = extractBiggest.reset_index() # extractBiggest.columns = ['terms', 'score', 'country'] logger.info("saving TF-IDF top"+str(extractBiggest['terms'].size)+" biggest score") extractBiggest.to_csv(path_for_filesaved+"/TFIDF_BiggestScore_on_whole_corpus.csv") def logsetup(log_fname): """ Initiate a logger object : - Log in file : collectweets.log - also print on screen :return: logger object """ logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s') now = datetime.now() file_handler = RotatingFileHandler(log_fname + "_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", 'a', 1000000, 1) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) stream_handler = logging.StreamHandler() # Only display on screen INFO stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) return logger def t_SNE_bert_embedding_visualization(biggest_score, logger, listOfLocalities="all", spatial_hieararchy="country", plotname="colored by country", paht2save="./"): """ Plot t-SNE representation of terms by country ressources: + https://colab.research.google.com/drive/1FmREx0O4BDeogldyN74_7Lur5NeiOVye?usp=sharing#scrollTo=Fbq5MAv0jkft + https://github.com/UKPLab/sentence-transformers :param biggest_score: :param listOfLocalities: :param spatial_hieararchy: :param plotname: :param paht2save: :return: """ modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens') # filter by localities for locality in biggest_score[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index) embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True) # embeddings.tofile(paht2save+"/tsne_bert-embeddings_"+plotname+"_matrix-embeddig") modelTSNE = TSNE(n_components=2) # n_components means the lower dimension low_dim_data = modelTSNE.fit_transform(embeddings) label_tsne = biggest_score[spatial_hieararchy] # Style Plots a bit sns.set_style('darkgrid') sns.set_palette('muted') sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5}) plt.rcParams['figure.figsize'] = (20, 14) tsne_df = pd.DataFrame(low_dim_data, label_tsne) tsne_df.columns = ['x', 'y'] ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index) plt.setp(ax.get_legend().get_texts(), fontsize='40') # for legend text plt.setp(ax.get_legend().get_title(), fontsize='50') # for legend title plt.ylim(-100,100) plt.xlim(-100, 100) #ax.set_title('T-SNE BERT Sentence Embeddings for '+plotname) plt.savefig(paht2save+"/tsne_bert-embeddings_"+plotname) logger.info("file: "+paht2save+"/tsne_bert-embeddings_"+plotname+" has been saved.") #plt.show() plt.close() # Perform kmean clustering # num_clusters = 5 # clustering_model = KMeans(n_clusters=num_clusters) # clustering_model.fit(embeddings) # cluster_assignment = clustering_model.labels_ # Normalize the embeddings to unit length corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True) # Perform kmean clustering clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4) clustering_model.fit(corpus_embeddings) cluster_assignment = clustering_model.labels_ # clustered_sentences = [[] for i in range(num_clusters)] # for sentence_id, cluster_id in enumerate(cluster_assignment): # clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id]) clustered_sentences = {} for sentence_id, cluster_id in enumerate(cluster_assignment): if cluster_id not in clustered_sentences: clustered_sentences[cluster_id] = [] clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id]) #for i, cluster in enumerate(clustered_sentences): # for i, cluster in clustered_sentences.items(): # print("Cluster ", i+1) # print(cluster) # print("") def bert_embedding_filtred(biggest_score, listOfLocalities="all", spatial_hieararchy="country"): """ Retrieve embedding of a matrix of terms (possibility of filtring by a list of locality) :param biggest_score: pd.Datraframe with columns : [terms, country/state/city] :param listOfLocalities: :param spatial_hieararchy: :return: """ modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens') # filter by localities if listOfLocalities != "all": for locality in biggest_score[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index) embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True) return embeddings def similarity_intra_matrix_pairwise(matrix): """ Compute pairwise cosine similarity on the rows of a Matrix and retrieve unique score by pair. indeed, cosine_similarity pairwise retrive a matrix with duplication : let's take an exemple : Number of terms : 4, cosine similarity : w1 w2 w3 w4 +---+---+----+--+ w1 | 1 | | | | w2 | | 1 | | | w3 | | | 1 | | w4 | | | | 1 | +---+---+----+--+ (w1, w2) = (w2, w1), so we have to keep only : (number_of_terms)^2/2 - (number_of_terms)/2 for nb_term = 4 : 4*4/2 - 4/2 = 16/2 - 4/2 = 6 => we have 6 unique scores :param matrix: :return: list of unique similarity score """ similarity = cosine_similarity(sparse.csr_matrix(matrix)) similarity_1D = np.array([]) for i, row in enumerate(similarity): similarity_1D = np.append(similarity_1D, row[i+1:]) # We remove duplicate pairwise value return similarity_1D def similarity_inter_matrix(matrix1, matrix2): """ :param matrix1: :param matrix2: :return: """ similarity = 1 - sp.distance.cdist(matrix1, matrix2, 'cosine') return similarity def clustering_terms(biggest, logger, cluster_f_out, listOfLocalities="all", spatial_hieararchy="country", method="kmeans"): """ :param biggest: :param method: :return: """ method_list = ["kmeans", "agglomerative_clustering"] if method not in method_list: logger.error("This method is not implemented for clustering: "+str(method)) return -1 # filter by localities if listOfLocalities != "all": for locality in biggest[spatial_hieararchy].unique(): if locality not in listOfLocalities: biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index) embeddings = bert_embedding_filtred(biggest) if method == "kmeans": # Perform kmean clustering num_clusters = 5 clustering_model = KMeans(n_clusters=num_clusters) clustering_model.fit(embeddings) cluster_assignment = clustering_model.labels_ elif method == "agglomerative_clustering": # Normalize the embeddings to unit length corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True) # Perform Agglomerative clustering clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4) clustering_model.fit(corpus_embeddings) cluster_assignment = clustering_model.labels_ clustered_sentences = {} for sentence_id, cluster_id in enumerate(cluster_assignment): if str(cluster_id) not in clustered_sentences: clustered_sentences[str(cluster_id)] = [] clustered_sentences[str(cluster_id)].append(biggest['terms'].iloc[sentence_id]) with open(cluster_f_out, "w") as outfile: json.dump(clustered_sentences, outfile) logger.info("file " + cluster_f_out + " has been saved") def geocoding_token(biggest, listOfLocality, spatial_hieararchy, logger): """ Find and geocode Spatial entity with OSM data (nominatim) Respect terms and use of OSM and Nomitim : - Specify a name for the application, Ie.e user agent - add delay between each query : min_delay_seconds = 1. See : https://geopy.readthedocs.io/en/stable/#module-geopy.extra.rate_limiter - define a time out for waiting nomatim answer : to 10 seconds :param biggest: :return: biggest with geocoding information """ try: if listOfLocality != "all": for locality in biggest[spatial_hieararchy].unique(): if locality not in listOfLocality: biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index) except: logger.info("could not filter, certainly because there is no spatial hiearchy on biggest score") geolocator = Nominatim(user_agent="h-tfidf-evaluation", timeout=10) geocoder = RateLimiter(geolocator.geocode, min_delay_seconds=1) tqdm.pandas() biggest["geocode"] = biggest["terms"].progress_apply(geocoder) return biggest def post_traitement_flood(biggest, logger, spatialLevel, ratio_of_flood=0.5): """ Remove terms from people flooding : return same dataframe with 1 more column : user_flooding With default ratio_of_flood : If an twitter.user use a term in more than 50% of occurence of this terms, we consider this user is flooding :param biggest: File of terms to process :param logger: :param: spatialLevel : work on Country / State / City :param: ratio_of_flood :return: return same dataframe with 1 more column : user_flooding """ ratio_of_flood_global = ratio_of_flood es_logger.setLevel(logging.WARNING) # pre-build elastic query for spatialLevel : rest_user_osm_level = "" if spatialLevel == "country": rest_user_osm_level = "rest_user_osm.country" elif spatialLevel == "state": rest_user_osm_level = "rest.features.properties.state" elif spatialLevel == "city": rest_user_osm_level = "rest.features.properties.city" def is_an_user_flooding(term, locality): client = Elasticsearch("http://localhost:9200") index = "twitter" # Query : ## Retrieve only user name where in full_text = term and rest_user_osm.country = locality if term is not np.NAN: query = {"_source": "user.name","query":{"bool":{"filter":[{"bool":{"should":[{"match_phrase":{"full_text":term}}],"minimum_should_match":1}}, {"bool":{"should":[{"match_phrase":{rest_user_osm_level:locality}}],"minimum_should_match":1}}]}}} try: result = Elasticsearch.search(client, index=index, body=query) list_of_user = [] if len(result["hits"]["hits"]) != 0: for hit in result["hits"]["hits"]: user = hit["_source"]["user"]["name"] list_of_user.append(user) dict_user_nbtweet = dict(Counter(list_of_user)) d = dict((k, v) for k, v in dict_user_nbtweet.items() if v >= (ratio_of_flood_global * len(list_of_user))) if len(d) > 0 : # there is a flood on this term: return 1 else: return 0 else: # not found in ES why ? return "not_in_es" except: logger.info("There is a trouble with this term: " + str(term)) return np.NAN else: return 0 logger.debug("start remove terms if they coming from a flooding user, ie, terms in "+str(ratio_of_flood_global*100)+"% of tweets from an unique user over tweets with this words") tqdm.pandas() biggest["user_flooding"] = biggest.progress_apply(lambda t: is_an_user_flooding(t.terms, t[spatialLevel]), axis=1) return biggest def venn(biggest, logger, spatial_level, result_path, locality): """ Build Venn diagramm in word_cloud Save fig in result_path Discussion about font size : In each subset (common or specific), the font size of term is related with the H-TFIDF Rank inside the subset :param biggest: :param logger: :param spatialLevel: :return: """ # Post-traitement biggest = biggest[biggest["user_flooding"] == "0"] # Select locality biggest = biggest[biggest[spatial_level] == locality] # select week weeks = biggest['date'].unique() if len(weeks) == 2: sets = [] weeks_list = [] for week in weeks: sets.append(set(biggest[biggest["date"] == week].terms[0:100])) weeks_list.append(week) try: venn = venn2_wordcloud(sets, set_labels=weeks_list, wordcloud_kwargs=dict(min_font_size=10),) except: logger.info("Can't build venn for: "+locality) elif len(weeks) == 3 or len(weeks) > 3: sets = [] weeks_list = [] word_frequency = {} # for font-size of wordcloud : based on H-TFIDF Rank for nb, week in enumerate(weeks[-3:]): sets.append(set(biggest[biggest["date"] == week].terms[0:100])) weeks_list.append(week) for rank, term in enumerate(biggest[biggest["date"] == week].terms[0:100]): if term not in word_frequency: word_frequency[term] = (100 - rank) try: venn = venn3_wordcloud(sets, set_labels=weeks_list, word_to_frequency=word_frequency, wordcloud_kwargs=dict(min_font_size=4,),) except: logger.info("Can't build venn for: "+locality) sorted_word_frequency = dict(sorted(word_frequency.items(), key=operator.itemgetter(1),reverse=True)) logger.info(locality + ": " + str(sorted_word_frequency)) plt.savefig(result_path + "/venn_" + locality) def frequent_terms_by_level(matrixOcc, logger, most_frequent_terms_fpath, listOfLocalities='all', spatialLevel='country'): """ :param matrixOcc: :param most_frequent_terms_fpath: :param listOfLocalities: :param spatialLevel: :return: """ #matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfLocalities, # spatialLevel=spatialLevel, period='all') # Aggregate by level ## Create 4 new columns : city, State, Country and date def splitindex(row): return row.split("_") matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \ zip(*matrixOcc.index.map(splitindex)) matrixOcc.date = pd.to_datetime((matrixOcc.date)) # convert date into datetime if spatialLevel == 'city': matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="Y")]).sum() elif spatialLevel == 'state': matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="Y")]).sum() elif spatialLevel == 'country': matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="Y")]).sum() # Export N biggest TF-IDF score: top_n = 500 extractBiggest = pd.DataFrame(index=matrixOcc.index, columns=range(0, top_n)) for row in matrixOcc.index: try: row_without_zero = matrixOcc.loc[row]# we remove term with a score = 0 row_without_zero = row_without_zero[ row_without_zero !=0 ] try: extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys() except: extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys() except: logger.debug("H-TFIDF: city " + str(matrixOcc.loc[row].name) + "not enough terms") # Transpose this table in order to share the same structure with TF-IDF classifical biggest score : hbt = pd.DataFrame() extractBiggest = extractBiggest.reset_index() for index, row in extractBiggest.iterrows(): hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"]) hbtrow[spatialLevel] = row[spatialLevel] hbtrow["date"] = row["date"] hbt = hbt.append(hbtrow, ignore_index=True) # save file logger.info("saving file: "+most_frequent_terms_fpath) hbt.to_csv(most_frequent_terms_fpath) return hbt def comparison_htfidf_tfidf_frequentterms(htfidf_f, tfidf_corpus_country_f, frequent_terms, logger, plot_f_out, listOfCountries="all"): # Open dataframes htfidf = pd.read_csv(htfidf_f, index_col=0) tfidf = pd.read_csv(tfidf_corpus_country_f, index_col=0) for nb_terms in [100, 200, 500]: # barchart building barchart_df_col = ["country", "h-tfidf", "tf-idf"] barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries))) # loop on countries for country in listOfCountries: htfidf_country = htfidf[htfidf["country"] == country] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] # loop on weeks htfidf_overlap_per_week_df = pd.DataFrame(index=range(1)) for week in htfidf_country.date.unique(): htfidf_country_week = htfidf_country[htfidf_country["date"] == week] # build on venn comparison H-TFIDF with Frequent terms sets = [] sets.append(set(htfidf_country_week.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) try: venn_htfidf = venn2_wordcloud(sets) htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11')) except: htfidf_overlap_per_week_df[week] = np.NAN # mean value for all weeks : mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms # Compute TF-IDF overlap with Frequent termes sets = [] sets.append(set(tfidf_country.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) logger.info(country) venn_tfidf = venn2_wordcloud(sets) plt.close('all') # barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11')) tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms # build the row for barchart if country == "Ἑλλάς": country = "Greece" row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap} barchart_df = barchart_df.append(row, ignore_index=True) # Plot bar chart barchart_df = barchart_df.set_index("country") barchart_df = barchart_df.dropna() barchart_df.plot.bar(figsize=(8,6)) plt.subplots_adjust(bottom=0.27) plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms") plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png") # build venn diagramm ## Choose a country country = "United Kingdom" nb_terms = 100 week = "2020-01-26" ## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3] tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3] frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3] ### Remove number htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) columns_name = [] latex_table_nb_terms = 30 for i in range(latex_table_nb_terms): columns_name.append("rank "+str(i)) latex_table = pd.DataFrame(index=range(3), columns=columns_name) latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False)) sets = [] sets.append(set(htfidf_country_terms)) sets.append(set(tfidf_country_terms)) sets.append(set(frequent_terms_country_terms)) fig, ax = plt.subplots(figsize=(8, 6)) venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax) plt.savefig(plot_f_out + "_"+ country + "venn3.png") plt.show() def comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_whole_f, frequent_terms, logger, plot_f_out, listOfCountries="all"): # Open dataframes htfidf = pd.read_csv(htfidf_f, index_col=0) tfidf = pd.read_csv(tfidf_whole_f, index_col=0) for nb_terms in [100, 200, 500]: # barchart building barchart_df_col = ["country", "h-tfidf", "tf-idf"] barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries))) # loop on countries for country in listOfCountries: # build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"] if country == "Ἑλλάς": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Greece")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Greece")] elif country == "Deutschland": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Germany")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Germany")] elif country == "España": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Spain")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Spain")] elif country == "Italia": htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Italy")] tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Italy")] else: htfidf_country = htfidf[htfidf["country"] == country] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] # loop on weeks htfidf_overlap_per_week_df = pd.DataFrame(index=range(1)) for week in htfidf_country.date.unique(): htfidf_country_week = htfidf_country[htfidf_country["date"] == week] # build on venn comparison H-TFIDF with Frequent terms sets = [] sets.append(set(htfidf_country_week.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) try: venn_htfidf = venn2_wordcloud(sets) htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11')) except: htfidf_overlap_per_week_df[week] = np.NAN # mean value for all weeks : mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms # Compute TF-IDF overlap with Frequent termes sets = [] sets.append(set(tfidf_country.terms[0:nb_terms])) sets.append(set(frequent_terms_country.terms[0:nb_terms])) logger.info(country) try : venn_tfidf = venn2_wordcloud(sets) plt.close('all') # barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11')) tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms except: logger.info("No terms in biggest score for TF-IDF - country: " + country) tfidf_overlap = 0.0 # build the row for barchart if country == "Ἑλλάς": country = "Greece" row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap} barchart_df = barchart_df.append(row, ignore_index=True) # Plot bar chart barchart_df = barchart_df.set_index("country") barchart_df = barchart_df.dropna() barchart_df.plot.bar(figsize=(8,6)) plt.subplots_adjust(bottom=0.27) plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms") plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png") # build venn diagramm ## Choose a country country = "Germany" nb_terms = 100 week = "2020-01-26" ## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)] tfidf_country = tfidf[tfidf["country"] == country] frequent_terms_country = frequent_terms[frequent_terms["country"] == country] htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3] tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3] frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3] ### Remove number htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms) columns_name = [] latex_table_nb_terms = 15 for i in range(latex_table_nb_terms): columns_name.append("rank "+str(i)) latex_table = pd.DataFrame(index=range(3), columns=columns_name) latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False)) sets = [] sets.append(set(htfidf_country_terms)) sets.append(set(tfidf_country_terms)) sets.append(set(frequent_terms_country_terms)) fig, ax = plt.subplots(figsize=(8, 6)) venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax) plt.savefig(plot_f_out + "_"+ country + "venn3.png") plt.show() if __name__ == '__main__': # Global parameters : ## Spatial level hierarchie : # spatialLevels = ['country', 'state', 'city'] spatialLevels = ['country', 'state'] ## Time level hierarchie : timeLevel = "week" ## List of country to work on : listOfLocalities = ["Deutschland", "España", "France", "Italia", "United Kingdom"] ## elastic query : query_fname = "elasticsearch/analyse/nldb21/elastic-query/nldb21_europeBySpatialExtent_en_february.txt" ## Path to results : period_extent = "feb_tfidf_whole" f_path_result = "elasticsearch/analyse/nldb21/results/" + period_extent + "_" + timeLevel if not os.path.exists(f_path_result): os.makedirs(f_path_result) # Workflow parameters : ## Rebuild H-TFIDF (with Matrix Occurence) build_htfidf = False build_htfidf_save_intermediaire_files = True ## eval 1 : Comparison with classical TF-IDf build_classical_tfidf = False build_classical_tfidf_save_intermediaire_files = False ## evla 2 : Use word_embedding with t-SNE build_tsne = False build_tsne_spatial_level = "country" ## eval 3 : Use word_embedding with box plot to show disparity build_boxplot = False build_boxplot_spatial_level = "country" ## eval 4 : Compare H-TFIDF and TF-IDF with most frequent terms by level build_compare_measures = True build_compare_measures_build_intermedate_files = False build_compare_measures_level = "country" build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"] ## post-traitement 1 : geocode term build_posttraitement_geocode = False ## post-traitement 2 : remove terms form a flooding user build_posttraitement_flooding = False build_posttraitement_flooding_spatial_levels = spatialLevels ## Analyse H-TFIDF for epidemiology 1 : clustering build_clustering = False build_clustering_spatial_levels = ['country', 'state'] build_clustering_list_hierachical_locality = { "country": ["France", "Deutschland", "España", "Italia", "United Kingdom"], 'state': ["Lombardia", "Lazio"], # "city": ["London"] } ## Venn diagramm build_venn = False build_venn_spatial_level = "country" # initialize a logger : log_fname = "elasticsearch/analyse/nldb21/logs/nldb21_" logger = logsetup(log_fname) logger.info("H-TFIDF expirements starts") if build_htfidf: # start the elastic query query = open(query_fname, "r").read() logger.debug("elasticsearch : start quering") tweetsByCityAndDate = elasticsearch_query(query_fname, logger) logger.debug("elasticsearch : stop quering") # Build a matrix of occurence for each terms in document aggregate by city and day ## prepare tree for file in commun for all spatial level : f_path_result_common = f_path_result+"/common" if not os.path.exists(f_path_result_common): os.makedirs(f_path_result_common) ## Define file path matrixAggDay_fpath = f_path_result_common + "/matrixAggDay.csv" matrixOccurence_fpath = f_path_result_common + "/matrixOccurence.csv" logger.debug("Build matrix of occurence : start") matrixOccurence = matrixOccurenceBuilder(tweetsByCityAndDate, matrixAggDay_fpath, matrixOccurence_fpath, build_htfidf_save_intermediaire_files, logger) logger.debug("Build matrix of occurence : stop") ## import matrixOccurence if you don't want to re-build it # matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0) for spatialLevel in spatialLevels: logger.info("H-TFIDF on: "+spatialLevel) f_path_result_level = f_path_result+"/"+spatialLevel if not os.path.exists(f_path_result_level): os.makedirs(f_path_result_level) ## Compute H-TFIDF matrixHTFIDF_fname = f_path_result_level + "/matrix_H-TFIDF.csv" biggestHTFIDFscore_fname = f_path_result_level + "/h-tfidf-Biggest-score.csv" logger.debug("H-TFIDF : start to compute") HTFIDF(matrixOcc=matrixOccurence, matrixHTFIDF_fname=matrixHTFIDF_fname, biggestHTFIDFscore_fname=biggestHTFIDFscore_fname, spatialLevel=spatialLevel, temporalLevel=timeLevel, ) logger.info("H-TFIDF : stop to compute for all spatial levels") ## Comparison with TF-IDF f_path_result_tfidf = f_path_result + "/tf-idf-classical" f_path_result_tfidf_by_locality = f_path_result_tfidf + "/tfidf-tf-corpus-country" if build_classical_tfidf : if not os.path.exists(f_path_result_tfidf): os.makedirs(f_path_result_tfidf) if not os.path.exists(f_path_result_tfidf_by_locality): os.makedirs(f_path_result_tfidf_by_locality) ### On whole corpus TFIDF_TF_on_whole_corpus(elastic_query_fname=query_fname, logger=logger, save_intermediaire_files=build_classical_tfidf_save_intermediaire_files, path_for_filesaved=f_path_result_tfidf) ### By Country TFIDF_TF_with_corpus_state(elastic_query_fname=query_fname, logger=logger, save_intermediaire_files=build_classical_tfidf_save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved=f_path_result_tfidf_by_locality, spatial_hiearchy="country", temporal_period='all') if build_compare_measures: f_path_result_compare_meassures_dir = f_path_result+"/common" f_path_result_compare_meassures_file = \ f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level + ".csv" f_path_result_compare_meassures_plot = \ f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level if not os.path.exists(f_path_result_compare_meassures_dir): os.makedirs(f_path_result_compare_meassures_dir) # open Matrix of occurence: try: matrixOccurence = pd.read_csv(f_path_result_compare_meassures_dir + '/matrixOccurence.csv', index_col=0) except: logger.error("File: " + f_path_result_compare_meassures_dir + '/matrixOccurence.csv' + "doesn't exist. You may need to save intermediate file for H-TFIDF") logger.info("Retrieve frequent terms per country") if build_compare_measures_build_intermedate_files: ft = frequent_terms_by_level(matrixOccurence, logger, f_path_result_compare_meassures_file, build_compare_measures_localities, build_compare_measures_level) else: ft = pd.read_csv(f_path_result_compare_meassures_file) # files_path htfidf_f = f_path_result + "/country/h-tfidf-Biggest-score.csv" tfidf_corpus_whole_f = f_path_result + "/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv" comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_corpus_whole_f, ft, logger, f_path_result_compare_meassures_plot, listOfCountries=build_compare_measures_localities) if build_tsne : f_path_result_tsne = f_path_result+"/tsne" if not os.path.exists(f_path_result_tsne): os.makedirs(f_path_result_tsne) biggest_TFIDF_country = pd.read_csv(f_path_result+"/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0) biggest_TFIDF_whole = pd.read_csv(f_path_result+"/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv") biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_tsne_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0) # t_SNE visulation t_SNE_bert_embedding_visualization(biggest_TFIDF_country, logger, listOfLocalities=listOfLocalities, plotname="TF-IDF on corpus by Country", paht2save=f_path_result_tsne) t_SNE_bert_embedding_visualization(biggest_H_TFIDF, logger, listOfLocalities=listOfLocalities, plotname="H-TFIDF", paht2save=f_path_result_tsne) if build_boxplot : # dir path to save : f_path_result_boxplot = f_path_result+"/pairwise-similarity-boxplot" if not os.path.exists(f_path_result_boxplot): os.makedirs(f_path_result_boxplot) # open result from mesures : biggest_TFIDF_country = pd.read_csv(f_path_result_tfidf_by_locality+"/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0) biggest_TFIDF_whole = pd.read_csv(f_path_result_tfidf+"/TFIDF_BiggestScore_on_whole_corpus.csv") biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_boxplot_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0) # Retrieve embedding : htfidf_embeddings = bert_embedding_filtred(biggest_H_TFIDF, listOfLocalities=listOfLocalities) tfidf_country_embeddings = bert_embedding_filtred(biggest_TFIDF_country, listOfLocalities=listOfLocalities) tfidf_whole_embeddings = bert_embedding_filtred(biggest_TFIDF_whole) # Compute similarity : ## Distribution of similarities between terms extracted from a measure htidf_similarity = similarity_intra_matrix_pairwise(htfidf_embeddings) tfidf_country_similarity = similarity_intra_matrix_pairwise(tfidf_country_embeddings) tfidf_whole_similarity = similarity_intra_matrix_pairwise(tfidf_whole_embeddings) plt.subplot(131) plt.boxplot(htidf_similarity) plt.title("H-TFIDF") plt.ylim(0,1) plt.subplot(132) plt.boxplot(tfidf_country_similarity) plt.title("TFIDF with corpus by country") plt.ylim(0, 1) plt.subplot(133) plt.boxplot(tfidf_whole_similarity) plt.title("TFIDF on the whole corpus") plt.ylim(0, 1) plt.tight_layout() plt.subplots_adjust(wspace=0.3) plt.suptitle("Distribution of similarity values among the extracted terms pairs of a measure") plt.savefig(f_path_result_boxplot+"/pairwise-similarity-boxplot.png") # plt.show() plt.close() ## Distribution of similarities between the terms of a country extracted from a measure ### H-TFIDF fig2, axs2 = plt.subplots(1, 5) for i, country in enumerate(listOfLocalities): axs2[i].boxplot(similarity_intra_matrix_pairwise(htfidf_embeddings[i*500:(i+1)*500-1])) axs2[i].set_title(country, fontsize=40) axs2[i].set_ylim(0, 1) # fig2.suptitle("Distribution of similarity by pairs for H-TF-IDF") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_HTFIDF-country.png") # plt.show() plt.close(fig2) ### TF-IDF by corpus = country fig3, axs3 = plt.subplots(1, 5) for i, country in enumerate(listOfLocalities): axs3[i].boxplot(similarity_intra_matrix_pairwise(tfidf_country_embeddings[i*500:(i+1)*500-1])) axs3[i].set_title(country, fontsize=40) axs3[i].set_ylim(0, 1) # fig3.suptitle("Distribution of similarity by pairs for TF-IDF focus on each country") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_TFIDF-country.png") # plt.show() plt.close(fig3) ## Distribution of similarities between the set of terms of 2 measures ### H-TF-IDF with TF-IDF on whole corpus and TF-IDF country with TF-IDF on whole corpus fig_compare_TFIDF_whole, ax4 = plt.subplots(1,2) similarity_between_htfidf_tfidf_whole = similarity_inter_matrix(htfidf_embeddings, tfidf_whole_embeddings) similarity_between_tfidfcountry_tfidf_whole = similarity_inter_matrix(tfidf_country_embeddings, tfidf_whole_embeddings) similarity_between_htfidf_tfidf_whole_1D = np.array([]) similarity_between_tfidfcountry_tfidf_whole_1D = np.array([]) for i, row in enumerate(similarity_between_htfidf_tfidf_whole): similarity_between_htfidf_tfidf_whole_1D = np.append(similarity_between_htfidf_tfidf_whole_1D, row[i+1:]) # We remove duplicate pairwise value for i, row in enumerate(similarity_between_tfidfcountry_tfidf_whole): similarity_between_tfidfcountry_tfidf_whole_1D = np.append(similarity_between_tfidfcountry_tfidf_whole_1D, row[i + 1:]) ax4[0].boxplot(similarity_between_htfidf_tfidf_whole_1D) ax4[0].set_ylim(0, 1) ax4[0].set_title("H-TFIDF") ax4[1].boxplot(similarity_between_tfidfcountry_tfidf_whole_1D) ax4[1].set_ylim(0, 1) ax4[1].set_title("TFIDF on country") fig_compare_TFIDF_whole.suptitle("Distribution of similarity between H-TFIDF and TF-IDF on whole corpus") plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_between_TFIDF-whole.png") # plt.show() plt.close(fig_compare_TFIDF_whole) ## Distribution of similarities between sub-set terms by country compared by country pair if build_posttraitement_geocode: # Geocode terms : ## Comments : over geocode even on non spatial entities spatial_level = "country" listOfLocalities = ["France", "Deutschland", "España", "Italia", "United Kingdom"] f_path_result = "elasticsearch/analyse/nldb21/results/4thfeb_country" biggest_TFIDF_country = pd.read_csv( f_path_result+"/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_"+spatial_level+"_corpus.csv", index_col=0) biggest_TFIDF_whole =
pd.read_csv(f_path_result+"/TFIDF_BiggestScore_on_whole_corpus.csv")
pandas.read_csv
from contextlib import contextmanager import struct import tracemalloc import numpy as np import pytest from pandas._libs import hashtable as ht import pandas as pd import pandas._testing as tm from pandas.core.algorithms import isin @contextmanager def activated_tracemalloc(): tracemalloc.start() try: yield finally: tracemalloc.stop() def get_allocated_khash_memory(): snapshot = tracemalloc.take_snapshot() snapshot = snapshot.filter_traces( (tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),) ) return sum(map(lambda x: x.size, snapshot.traces)) @pytest.mark.parametrize( "table_type, dtype", [ (ht.PyObjectHashTable, np.object_), (ht.Complex128HashTable, np.complex128), (ht.Int64HashTable, np.int64), (ht.UInt64HashTable, np.uint64), (ht.Float64HashTable, np.float64), (ht.Complex64HashTable, np.complex64), (ht.Int32HashTable, np.int32), (ht.UInt32HashTable, np.uint32), (ht.Float32HashTable, np.float32), (ht.Int16HashTable, np.int16), (ht.UInt16HashTable, np.uint16), (ht.Int8HashTable, np.int8), (ht.UInt8HashTable, np.uint8), (ht.IntpHashTable, np.intp), ], ) class TestHashTable: def test_get_set_contains_len(self, table_type, dtype): index = 5 table = table_type(55) assert len(table) == 0 assert index not in table table.set_item(index, 42) assert len(table) == 1 assert index in table assert table.get_item(index) == 42 table.set_item(index + 1, 41) assert index in table assert index + 1 in table assert len(table) == 2 assert table.get_item(index) == 42 assert table.get_item(index + 1) == 41 table.set_item(index, 21) assert index in table assert index + 1 in table assert len(table) == 2 assert table.get_item(index) == 21 assert table.get_item(index + 1) == 41 assert index + 2 not in table with pytest.raises(KeyError, match=str(index + 2)): table.get_item(index + 2) def test_map_keys_to_values(self, table_type, dtype, writable): # only Int64HashTable has this method if table_type == ht.Int64HashTable: N = 77 table = table_type() keys = np.arange(N).astype(dtype) vals = np.arange(N).astype(np.int64) + N keys.flags.writeable = writable vals.flags.writeable = writable table.map_keys_to_values(keys, vals) for i in range(N): assert table.get_item(keys[i]) == i + N def test_map_locations(self, table_type, dtype, writable): N = 8 table = table_type() keys = (np.arange(N) + N).astype(dtype) keys.flags.writeable = writable table.map_locations(keys) for i in range(N): assert table.get_item(keys[i]) == i def test_lookup(self, table_type, dtype, writable): N = 3 table = table_type() keys = (np.arange(N) + N).astype(dtype) keys.flags.writeable = writable table.map_locations(keys) result = table.lookup(keys) expected = np.arange(N) tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64)) def test_lookup_wrong(self, table_type, dtype): if dtype in (np.int8, np.uint8): N = 100 else: N = 512 table = table_type() keys = (np.arange(N) + N).astype(dtype) table.map_locations(keys) wrong_keys = np.arange(N).astype(dtype) result = table.lookup(wrong_keys) assert np.all(result == -1) def test_unique(self, table_type, dtype, writable): if dtype in (np.int8, np.uint8): N = 88 else: N = 1000 table = table_type() expected = (np.arange(N) + N).astype(dtype) keys = np.repeat(expected, 5) keys.flags.writeable = writable unique = table.unique(keys) tm.assert_numpy_array_equal(unique, expected) def test_tracemalloc_works(self, table_type, dtype): if dtype in (np.int8, np.uint8): N = 256 else: N = 30000 keys = np.arange(N).astype(dtype) with activated_tracemalloc(): table = table_type() table.map_locations(keys) used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_tracemalloc_for_empty(self, table_type, dtype): with activated_tracemalloc(): table = table_type() used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_get_state(self, table_type, dtype): table = table_type(1000) state = table.get_state() assert state["size"] == 0 assert state["n_occupied"] == 0 assert "n_buckets" in state assert "upper_bound" in state @pytest.mark.parametrize("N", range(1, 110)) def test_no_reallocation(self, table_type, dtype, N): keys = np.arange(N).astype(dtype) preallocated_table = table_type(N) n_buckets_start = preallocated_table.get_state()["n_buckets"] preallocated_table.map_locations(keys) n_buckets_end = preallocated_table.get_state()["n_buckets"] # original number of buckets was enough: assert n_buckets_start == n_buckets_end # check with clean table (not too much preallocated) clean_table = table_type() clean_table.map_locations(keys) assert n_buckets_start == clean_table.get_state()["n_buckets"] class TestHashTableUnsorted: # TODO: moved from test_algos; may be redundancies with other tests def test_string_hashtable_set_item_signature(self): # GH#30419 fix typing in StringHashTable.set_item to prevent segfault tbl = ht.StringHashTable() tbl.set_item("key", 1) assert tbl.get_item("key") == 1 with pytest.raises(TypeError, match="'key' has incorrect type"): # key arg typed as string, not object tbl.set_item(4, 6) with pytest.raises(TypeError, match="'val' has incorrect type"): tbl.get_item(4) def test_lookup_nan(self, writable): # GH#21688 ensure we can deal with readonly memory views xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3]) xs.setflags(write=writable) m = ht.Float64HashTable() m.map_locations(xs) tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp)) def test_add_signed_zeros(self): # GH#21866 inconsistent hash-function for float64 # default hash-function would lead to different hash-buckets # for 0.0 and -0.0 if there are more than 2^30 hash-buckets # but this would mean 16GB N = 4 # 12 * 10**8 would trigger the error, if you have enough memory m = ht.Float64HashTable(N) m.set_item(0.0, 0) m.set_item(-0.0, 0) assert len(m) == 1 # 0.0 and -0.0 are equivalent def test_add_different_nans(self): # GH#21866 inconsistent hash-function for float64 # create different nans from bit-patterns: NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0] NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 # default hash function would lead to different hash-buckets # for NAN1 and NAN2 even if there are only 4 buckets: m = ht.Float64HashTable() m.set_item(NAN1, 0) m.set_item(NAN2, 0) assert len(m) == 1 # NAN1 and NAN2 are equivalent def test_lookup_overflow(self, writable): xs = np.array([1, 2, 2**63], dtype=np.uint64) # GH 21688 ensure we can deal with readonly memory views xs.setflags(write=writable) m = ht.UInt64HashTable() m.map_locations(xs) tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp)) @pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case @pytest.mark.parametrize( "htable, uniques, dtype, safely_resizes", [ (ht.PyObjectHashTable, ht.ObjectVector, "object", False), (ht.StringHashTable, ht.ObjectVector, "object", True), (ht.Float64HashTable, ht.Float64Vector, "float64", False), (ht.Int64HashTable, ht.Int64Vector, "int64", False), (ht.Int32HashTable, ht.Int32Vector, "int32", False), (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False), ], ) def test_vector_resize( self, writable, htable, uniques, dtype, safely_resizes, nvals ): # Test for memory errors after internal vector # reallocations (GH 7157) # Changed from using np.random.rand to range # which could cause flaky CI failures when safely_resizes=False vals = np.array(range(1000), dtype=dtype) # GH 21688 ensures we can deal with read-only memory views vals.setflags(write=writable) # initialise instances; cannot initialise in parametrization, # as otherwise external views would be held on the array (which is # one of the things this test is checking) htable = htable() uniques = uniques() # get_labels may append to uniques htable.get_labels(vals[:nvals], uniques, 0, -1) # to_array() sets an external_view_exists flag on uniques. tmp = uniques.to_array() oldshape = tmp.shape # subsequent get_labels() calls can no longer append to it # (except for StringHashTables + ObjectVector) if safely_resizes: htable.get_labels(vals, uniques, 0, -1) else: with pytest.raises(ValueError, match="external reference.*"): htable.get_labels(vals, uniques, 0, -1) uniques.to_array() # should not raise here assert tmp.shape == oldshape @pytest.mark.parametrize( "hashtable", [ ht.PyObjectHashTable, ht.StringHashTable, ht.Float64HashTable, ht.Int64HashTable, ht.Int32HashTable, ht.UInt64HashTable, ], ) def test_hashtable_large_sizehint(self, hashtable): # GH#22729 smoketest for not raising when passing a large size_hint size_hint = np.iinfo(np.uint32).max + 1 hashtable(size_hint=size_hint) class TestPyObjectHashTableWithNans: def test_nan_float(self): nan1 = float("nan") nan2 = float("nan") assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_complex_both(self): nan1 = complex(float("nan"), float("nan")) nan2 = complex(float("nan"), float("nan")) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_complex_real(self): nan1 = complex(float("nan"), 1) nan2 = complex(float("nan"), 1) other = complex(float("nan"), 2) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_nan_complex_imag(self): nan1 = complex(1, float("nan")) nan2 = complex(1, float("nan")) other = complex(2, float("nan")) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_nan_in_tuple(self): nan1 = (float("nan"),) nan2 = (float("nan"),) assert nan1[0] is not nan2[0] table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_in_nested_tuple(self): nan1 = (1, (2, (float("nan"),))) nan2 = (1, (2, (float("nan"),))) other = (1, 2) table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_hash_equal_tuple_with_nans(): a = (float("nan"), (float("nan"), float("nan"))) b = (float("nan"), (float("nan"), float("nan"))) assert ht.object_hash(a) == ht.object_hash(b) assert ht.objects_are_equal(a, b) def test_get_labels_groupby_for_Int64(writable): table = ht.Int64HashTable() vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64) vals.flags.writeable = writable arr, unique = table.get_labels_groupby(vals) expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp) expected_unique = np.array([1, 2], dtype=np.int64) tm.assert_numpy_array_equal(arr, expected_arr) tm.assert_numpy_array_equal(unique, expected_unique) def test_tracemalloc_works_for_StringHashTable(): N = 1000 keys = np.arange(N).astype(np.compat.unicode).astype(np.object_) with activated_tracemalloc(): table = ht.StringHashTable() table.map_locations(keys) used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_tracemalloc_for_empty_StringHashTable(): with activated_tracemalloc(): table = ht.StringHashTable() used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 @pytest.mark.parametrize("N", range(1, 110)) def test_no_reallocation_StringHashTable(N): keys = np.arange(N).astype(np.compat.unicode).astype(np.object_) preallocated_table =
ht.StringHashTable(N)
pandas._libs.hashtable.StringHashTable
import datetime import re from warnings import ( catch_warnings, simplefilter, ) import numpy as np import pytest from pandas._libs.tslibs import Timestamp from pandas.compat import is_platform_windows import pandas as pd from pandas import ( DataFrame, Index, Series, _testing as tm, bdate_range, read_hdf, ) from pandas.tests.io.pytables.common import ( _maybe_remove, ensure_clean_path, ensure_clean_store, ) from pandas.util import _test_decorators as td _default_compressor = "blosc" pytestmark = pytest.mark.single def test_conv_read_write(setup_path): with tm.ensure_clean() as path: def roundtrip(key, obj, **kwargs): obj.to_hdf(path, key, **kwargs) return read_hdf(path, key) o = tm.makeTimeSeries() tm.assert_series_equal(o, roundtrip("series", o)) o = tm.makeStringSeries() tm.assert_series_equal(o, roundtrip("string_series", o)) o = tm.makeDataFrame() tm.assert_frame_equal(o, roundtrip("frame", o)) # table df = DataFrame({"A": range(5), "B": range(5)}) df.to_hdf(path, "table", append=True) result = read_hdf(path, "table", where=["index>2"]) tm.assert_frame_equal(df[df.index > 2], result) def test_long_strings(setup_path): # GH6166 df = DataFrame( {"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10) ) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=["a"]) result = store.select("df") tm.assert_frame_equal(df, result) def test_api(setup_path): # GH4584 # API issue when to_hdf doesn't accept append AND format args with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.iloc[:10].to_hdf(path, "df", append=True) df.iloc[10:].to_hdf(path, "df", append=True, format="table") tm.assert_frame_equal(read_hdf(path, "df"), df) # append to False df.iloc[:10].to_hdf(path, "df", append=False, format="table") df.iloc[10:].to_hdf(path, "df", append=True) tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_path(setup_path) as path: df = tm.makeDataFrame() df.to_hdf(path, "df", append=False, format="fixed") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False, format="f") tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df", append=False) tm.assert_frame_equal(read_hdf(path, "df"), df) df.to_hdf(path, "df") tm.assert_frame_equal(read_hdf(path, "df"), df) with ensure_clean_store(setup_path) as store: df = tm.makeDataFrame() _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=True, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # append to False _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) # formats _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format="table") tm.assert_frame_equal(store.select("df"), df) _maybe_remove(store, "df") store.append("df", df.iloc[:10], append=False, format="table") store.append("df", df.iloc[10:], append=True, format=None) tm.assert_frame_equal(store.select("df"), df) with ensure_clean_path(setup_path) as path: # Invalid. df = tm.makeDataFrame() msg = "Can only append to Tables" with pytest.raises(ValueError, match=msg): df.to_hdf(path, "df", append=True, format="f") with pytest.raises(ValueError, match=msg): df.to_hdf(path, "df", append=True, format="fixed") msg = r"invalid HDFStore format specified \[foo\]" with pytest.raises(TypeError, match=msg): df.to_hdf(path, "df", append=True, format="foo") with pytest.raises(TypeError, match=msg): df.to_hdf(path, "df", append=False, format="foo") # File path doesn't exist path = "" msg = f"File {path} does not exist" with pytest.raises(FileNotFoundError, match=msg): read_hdf(path, "df") def test_get(setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() left = store.get("a") right = store["a"] tm.assert_series_equal(left, right) left = store.get("/a") right = store["/a"] tm.assert_series_equal(left, right) with pytest.raises(KeyError, match="'No object named b in the file'"): store.get("b") def test_put_integer(setup_path): # non-date, non-string index df = DataFrame(np.random.randn(50, 100)) _check_roundtrip(df, tm.assert_frame_equal, setup_path) def test_table_values_dtypes_roundtrip(setup_path): with ensure_clean_store(setup_path) as store: df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8") store.append("df_f8", df1) tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes) df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8") store.append("df_i8", df2) tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes) # incompatible dtype msg = re.escape( "invalid combination of [values_axes] on appending data " "[name->values_block_0,cname->values_block_0," "dtype->float64,kind->float,shape->(1, 3)] vs " "current table [name->values_block_0," "cname->values_block_0,dtype->int64,kind->integer," "shape->None]" ) with pytest.raises(ValueError, match=msg): store.append("df_i8", df1) # check creation/storage/retrieval of float32 (a bit hacky to # actually create them thought) df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"]) store.append("df_f4", df1) tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes) assert df1.dtypes[0] == "float32" # check with mixed dtypes df1 = DataFrame( { c: Series(np.random.randint(5), dtype=c) for c in ["float32", "float64", "int32", "int64", "int16", "int8"] } ) df1["string"] = "foo" df1["float322"] = 1.0 df1["float322"] = df1["float322"].astype("float32") df1["bool"] = df1["float32"] > 0 df1["time1"] = Timestamp("20130101") df1["time2"] = Timestamp("20130102") store.append("df_mixed_dtypes1", df1) result = store.select("df_mixed_dtypes1").dtypes.value_counts() result.index = [str(i) for i in result.index] expected = Series( { "float32": 2, "float64": 1, "int32": 1, "bool": 1, "int16": 1, "int8": 1, "int64": 1, "object": 1, "datetime64[ns]": 2, } ) result = result.sort_index() expected = expected.sort_index() tm.assert_series_equal(result, expected) def test_series(setup_path): s = tm.makeStringSeries() _check_roundtrip(s, tm.assert_series_equal, path=setup_path) ts = tm.makeTimeSeries() _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) ts2 = Series(ts.index, Index(ts.index, dtype=object)) _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path) ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) _check_roundtrip( ts3, tm.assert_series_equal, path=setup_path, check_index_type=False ) def test_float_index(setup_path): # GH #454 index = np.random.randn(10) s = Series(np.random.randn(10), index=index) _check_roundtrip(s, tm.assert_series_equal, path=setup_path) def test_tuple_index(setup_path): # GH #492 col = np.arange(10) idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)] data = np.random.randn(30).reshape((3, 10)) DF = DataFrame(data, index=idx, columns=col) with catch_warnings(record=True): simplefilter("ignore", pd.errors.PerformanceWarning) _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path) @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") def test_index_types(setup_path): with catch_warnings(record=True): values = np.random.randn(2) func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True) with catch_warnings(record=True): ser = Series(values, [0, "y"]) _check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [datetime.datetime.today(), 0]) _check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, ["y", 0]) _check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [datetime.date.today(), "a"]) _check_roundtrip(ser, func, path=setup_path) with catch_warnings(record=True): ser = Series(values, [0, "y"]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, [datetime.datetime.today(), 0]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, ["y", 0]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, [datetime.date.today(), "a"]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1.23, "b"]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1, 1.53]) _check_roundtrip(ser, func, path=setup_path) ser = Series(values, [1, 5]) _check_roundtrip(ser, func, path=setup_path) ser = Series( values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)] ) _check_roundtrip(ser, func, path=setup_path) def test_timeseries_preepoch(setup_path): dr = bdate_range("1/1/1940", "1/1/1960") ts = Series(np.random.randn(len(dr)), index=dr) try: _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) except OverflowError: if is_platform_windows(): pytest.xfail("known failure on some windows platforms") else: raise @pytest.mark.parametrize( "compression", [False, pytest.param(True, marks=td.skip_if_windows)] ) def test_frame(compression, setup_path): df = tm.makeDataFrame() # put in some random NAs df.values[0, 0] = np.nan df.values[5, 3] = np.nan _check_roundtrip_table( df, tm.assert_frame_equal, path=setup_path, compression=compression ) _check_roundtrip( df, tm.assert_frame_equal, path=setup_path, compression=compression ) tdf = tm.makeTimeDataFrame() _check_roundtrip( tdf, tm.assert_frame_equal, path=setup_path, compression=compression ) with ensure_clean_store(setup_path) as store: # not consolidated df["foo"] = np.random.randn(len(df)) store["df"] = df recons = store["df"] assert recons._mgr.is_consolidated() # empty _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path) def test_empty_series_frame(setup_path): s0 = Series(dtype=object) s1 = Series(name="myseries", dtype=object) df0 = DataFrame() df1 = DataFrame(index=["a", "b", "c"]) df2 = DataFrame(columns=["d", "e", "f"]) _check_roundtrip(s0, tm.assert_series_equal, path=setup_path) _check_roundtrip(s1, tm.assert_series_equal, path=setup_path) _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path) _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) @pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"]) def test_empty_series(dtype, setup_path): s = Series(dtype=dtype) _check_roundtrip(s, tm.assert_series_equal, path=setup_path) def test_can_serialize_dates(setup_path): rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")] frame = DataFrame(np.random.randn(len(rng), 4), index=rng) _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) def test_store_hierarchical(setup_path, multiindex_dataframe_random_data): frame = multiindex_dataframe_random_data _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path) _check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path) # check that the names are stored with ensure_clean_store(setup_path) as store: store["frame"] = frame recons = store["frame"] tm.assert_frame_equal(recons, frame) @pytest.mark.parametrize( "compression", [False, pytest.param(True, marks=td.skip_if_windows)] ) def test_store_mixed(compression, setup_path): def _make_one(): df = tm.makeDataFrame() df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 df["bool2"] = df["B"] > 0 df["int1"] = 1 df["int2"] = 2 return df._consolidate() df1 = _make_one() df2 = _make_one() _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) with ensure_clean_store(setup_path) as store: store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
pandas._testing.assert_frame_equal
import pandas as pd import numpy as np import copy from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV from sklearn.feature_selection import mutual_info_classif, SelectKBest import matplotlib.pyplot as plt from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from datetime import datetime from os import listdir from os.path import isfile, join import sys import math from sklearn.metrics import accuracy_score, f1_score import re from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \ get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \ get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \ get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \ get_flesch_reading_ease_vector, get_sentence_count, get_word_count from sklearn.preprocessing import StandardScaler, Normalizer # Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5 def trim_df_sum_feature(par_df, par_n): par_df = par_df.fillna(value=0) columns = par_df.columns.to_numpy() data_array = par_df.to_numpy(dtype=float) sum_arr = data_array.sum(axis=0) # reduce n if 0 features would be returned while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0: par_n -= 1 positions = list(np.where(sum_arr < par_n)) columns = np.delete(columns, positions) data_array = np.delete(data_array, positions, axis=1) return pd.DataFrame(data=data_array, columns=columns) # Chapter 7.1.1. method to trim feature with low occurrence over all article def trim_df_by_occurrence(par_df, n): df_masked = par_df.notnull().astype('int') word_rate = df_masked.sum() columns = [] filtered_bow = pd.DataFrame() for i in range(0, len(word_rate)): if word_rate[i] > n: columns.append(word_rate.index[i]) for c in columns: filtered_bow[c] = par_df[c] return filtered_bow # Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file def filter_low_occurrence(): df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500) print(f"BOW before: {len(df_bow.columns)}") df_bow = trim_df_by_occurrence(df_bow, 1) print(f"BOW after: {len(df_bow.columns)}") df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False) for n in range(2, 7): word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500) print(f"Word_{n}_gram before: {len(word_n_gram.columns)}") word_n_gram = trim_df_by_occurrence(word_n_gram, 1) print(f"Word_{n}_gram after: {len(word_n_gram.columns)}") word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False) for n in range(2, 6): char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',', encoding="utf-8", nrows=2500) print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}") char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5) print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}") char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False) char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',', encoding="utf-8", nrows=2500) print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}") char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5) print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}") char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False) char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',', encoding="utf-8", nrows=2500) print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}") char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5) print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}") char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False) df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500) print(f"Function Words before: {len(df_f_word.columns)}") df_f_word = trim_df_by_occurrence(df_f_word, 1) print(f"Function Words after: {len(df_f_word.columns)}") df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False) for n in range(2, 6): pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500) print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}") pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1) print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}") pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False) # Chapter 7.1.2. method to filter words based on document frequency def trim_df_by_doc_freq(par_df, par_doc_freq): df_masked = par_df.notnull().astype('int') word_rate = df_masked.sum() / len(par_df) columns = [] filtered_bow = pd.DataFrame() for i in range(0, len(word_rate)): if word_rate[i] < par_doc_freq: columns.append(word_rate.index[i]) for c in columns: filtered_bow[c] = par_df[c] return filtered_bow # Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file def filter_high_document_frequency(): # Filter words with high document frequency df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500) print(f"BOW before: {len(df_bow.columns)}") df_bow = trim_df_by_doc_freq(df_bow, 0.5) print(f"BOW after: {len(df_bow.columns)}") df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False) df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500) print(f"Function Word before: {len(df_f_word.columns)}") df_f_word = trim_df_by_doc_freq(df_f_word, 0.5) print(f"Function Word after: {len(df_f_word.columns)}") df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False) for n in range(2, 7): word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500) print(f"Word_{n}_gram before: {len(word_n_gram.columns)}") word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5) print(f"Word_{n}_gram after: {len(word_n_gram.columns)}") word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False) # Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence) def get_rel_frequency(par_df_count, par_df_len_metric_vector): df_rel_freq = pd.DataFrame(columns=par_df_count.columns) for index, row in par_df_count.iterrows(): df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index])) return df_rel_freq # Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare # the correlation to the article length from the absolute and relative feature, save the feature with the estimated # relative frequency in a new file def individual_relative_frequency(): df_len_metrics =
pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
pandas.read_csv
""" Unit test suite for OLS and PanelOLS classes """ # pylint: disable-msg=W0212 from __future__ import division from datetime import datetime import unittest import nose import numpy as np from pandas import date_range, bdate_range from pandas.core.panel import Panel from pandas import DataFrame, Index, Series, notnull, datetools from pandas.stats.api import ols from pandas.stats.ols import _filter_data from pandas.stats.plm import NonPooledPanelOLS, PanelOLS from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal) import pandas.util.testing as tm from common import BaseTest _have_statsmodels = True try: import statsmodels.api as sm except ImportError: try: import scikits.statsmodels.api as sm except ImportError: _have_statsmodels = False def _check_repr(obj): repr(obj) str(obj) def _compare_ols_results(model1, model2): assert(type(model1) == type(model2)) if hasattr(model1, '_window_type'): _compare_moving_ols(model1, model2) else: _compare_fullsample_ols(model1, model2) def _compare_fullsample_ols(model1, model2): assert_series_equal(model1.beta, model2.beta) def _compare_moving_ols(model1, model2): assert_frame_equal(model1.beta, model2.beta) class TestOLS(BaseTest): # TODO: Add tests for OLS y predict # TODO: Right now we just check for consistency between full-sample and # rolling/expanding results of the panel OLS. We should also cross-check # with trusted implementations of panel OLS (e.g. R). # TODO: Add tests for non pooled OLS. @classmethod def setUpClass(cls): try: import matplotlib as mpl mpl.use('Agg', warn=False) except ImportError: pass if not _have_statsmodels: raise nose.SkipTest def testOLSWithDatasets(self): self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True) self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True) self.checkDataSet(sm.datasets.longley.load(), skip_moving=True) self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True) self.checkDataSet(sm.datasets.copper.load()) self.checkDataSet(sm.datasets.scotland.load()) # degenerate case fails on some platforms # self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s def testWLS(self): X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D']) Y = Series(np.random.randn(30)) weights = X.std(1) self._check_wls(X, Y, weights) weights.ix[[5, 15]] = np.nan Y[[2, 21]] = np.nan self._check_wls(X, Y, weights) def _check_wls(self, x, y, weights): result = ols(y=y, x=x, weights=1/weights) combined = x.copy() combined['__y__'] = y combined['__weights__'] = weights combined = combined.dropna() endog = combined.pop('__y__').values aweights = combined.pop('__weights__').values exog = sm.add_constant(combined.values, prepend=False) sm_result = sm.WLS(endog, exog, weights=1/aweights).fit() assert_almost_equal(sm_result.params, result._beta_raw) assert_almost_equal(sm_result.resid, result._resid_raw) self.checkMovingOLS('rolling', x, y, weights=weights) self.checkMovingOLS('expanding', x, y, weights=weights) def checkDataSet(self, dataset, start=None, end=None, skip_moving=False): exog = dataset.exog[start : end] endog = dataset.endog[start : end] x = DataFrame(exog, index=np.arange(exog.shape[0]), columns=np.arange(exog.shape[1])) y = Series(endog, index=np.arange(len(endog))) self.checkOLS(exog, endog, x, y) if not skip_moving: self.checkMovingOLS('rolling', x, y) self.checkMovingOLS('rolling', x, y, nw_lags=0) self.checkMovingOLS('expanding', x, y, nw_lags=0) self.checkMovingOLS('rolling', x, y, nw_lags=1) self.checkMovingOLS('expanding', x, y, nw_lags=1) self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True) def checkOLS(self, exog, endog, x, y): reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit() result = ols(y=y, x=x) # check that sparse version is the same sparse_result = ols(y=y.to_sparse(), x=x.to_sparse()) _compare_ols_results(result, sparse_result) assert_almost_equal(reference.params, result._beta_raw) assert_almost_equal(reference.df_model, result._df_model_raw) assert_almost_equal(reference.df_resid, result._df_resid_raw) assert_almost_equal(reference.fvalue, result._f_stat_raw[0]) assert_almost_equal(reference.pvalues, result._p_value_raw) assert_almost_equal(reference.rsquared, result._r2_raw) assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw) assert_almost_equal(reference.resid, result._resid_raw) assert_almost_equal(reference.bse, result._std_err_raw) assert_almost_equal(reference.tvalues, result._t_stat_raw) assert_almost_equal(reference.cov_params(), result._var_beta_raw) assert_almost_equal(reference.fittedvalues, result._y_fitted_raw) _check_non_raw_results(result) def checkMovingOLS(self, window_type, x, y, weights=None, **kwds): window = sm.tools.tools.rank(x.values) * 2 moving = ols(y=y, x=x, weights=weights, window_type=window_type, window=window, **kwds) # check that sparse version is the same sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(), weights=weights, window_type=window_type, window=window, **kwds) _compare_ols_results(moving, sparse_moving) index = moving._index for n, i in enumerate(moving._valid_indices): if window_type == 'rolling' and i >= window: prior_date = index[i - window + 1] else: prior_date = index[0] date = index[i] x_iter = {} for k, v in x.iteritems(): x_iter[k] = v.truncate(before=prior_date, after=date) y_iter = y.truncate(before=prior_date, after=date) static = ols(y=y_iter, x=x_iter, weights=weights, **kwds) self.compare(static, moving, event_index=i, result_index=n) _check_non_raw_results(moving) FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value', 'r2', 'r2_adj', 'rmse', 'std_err', 't_stat', 'var_beta'] def compare(self, static, moving, event_index=None, result_index=None): index = moving._index # Check resid if we have a time index specified if event_index is not None: ref = static._resid_raw[-1] label = index[event_index] res = moving.resid[label] assert_almost_equal(ref, res) ref = static._y_fitted_raw[-1] res = moving.y_fitted[label] assert_almost_equal(ref, res) # Check y_fitted for field in self.FIELDS: attr = '_%s_raw' % field ref = getattr(static, attr) res = getattr(moving, attr) if result_index is not None: res = res[result_index] assert_almost_equal(ref, res) def test_ols_object_dtype(self): df = DataFrame(np.random.randn(20, 2), dtype=object) model = ols(y=df[0], x=df[1]) summary = repr(model) class TestOLSMisc(unittest.TestCase): ''' For test coverage with faux data ''' @classmethod def setupClass(cls): if not _have_statsmodels: raise nose.SkipTest def test_f_test(self): x = tm.makeTimeDataFrame() y = x.pop('A') model = ols(y=y, x=x) hyp = '1*B+1*C+1*D=0' result = model.f_test(hyp) hyp = ['1*B=0', '1*C=0', '1*D=0'] result = model.f_test(hyp) assert_almost_equal(result['f-stat'], model.f_stat['f-stat']) self.assertRaises(Exception, model.f_test, '1*A=0') def test_r2_no_intercept(self): y = tm.makeTimeSeries() x = tm.makeTimeDataFrame() x_with = x.copy() x_with['intercept'] = 1. model1 = ols(y=y, x=x) model2 = ols(y=y, x=x_with, intercept=False) assert_series_equal(model1.beta, model2.beta) # TODO: can we infer whether the intercept is there... self.assert_(model1.r2 != model2.r2) # rolling model1 = ols(y=y, x=x, window=20) model2 = ols(y=y, x=x_with, window=20, intercept=False) assert_frame_equal(model1.beta, model2.beta) self.assert_((model1.r2 != model2.r2).all()) def test_summary_many_terms(self): x = DataFrame(np.random.randn(100, 20)) y = np.random.randn(100) model = ols(y=y, x=x) model.summary def test_y_predict(self): y = tm.makeTimeSeries() x = tm.makeTimeDataFrame() model1 = ols(y=y, x=x) assert_series_equal(model1.y_predict, model1.y_fitted) assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw) def test_predict(self): y = tm.makeTimeSeries() x = tm.makeTimeDataFrame() model1 = ols(y=y, x=x) assert_series_equal(model1.predict(), model1.y_predict) assert_series_equal(model1.predict(x=x), model1.y_predict) assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict) exog = x.copy() exog['intercept'] = 1. rs = Series(np.dot(exog.values, model1.beta.values), x.index) assert_series_equal(model1.y_predict, rs) x2 = x.reindex(columns=x.columns[::-1]) assert_series_equal(model1.predict(x=x2), model1.y_predict) x3 = x2 + 10 pred3 = model1.predict(x=x3) x3['intercept'] = 1. x3 = x3.reindex(columns = model1.beta.index) expected = Series(np.dot(x3.values, model1.beta.values), x3.index) assert_series_equal(expected, pred3) beta = Series(0., model1.beta.index) pred4 = model1.predict(beta=beta) assert_series_equal(Series(0., pred4.index), pred4) def test_predict_longer_exog(self): exogenous = {"1998": "4760","1999": "5904","2000": "4504", "2001": "9808","2002": "4241","2003": "4086", "2004": "4687","2005": "7686","2006": "3740", "2007": "3075","2008": "3753","2009": "4679", "2010": "5468","2011": "7154","2012": "4292", "2013": "4283","2014": "4595","2015": "9194", "2016": "4221","2017": "4520"} endogenous = {"1998": "691", "1999": "1580", "2000": "80", "2001": "1450", "2002": "555", "2003": "956", "2004": "877", "2005": "614", "2006": "468", "2007": "191"} endog = Series(endogenous) exog = Series(exogenous) model = ols(y=endog, x=exog) pred = model.y_predict self.assert_(pred.index.equals(exog.index)) def test_longpanel_series_combo(self): wp = tm.makePanel() lp = wp.to_frame() y = lp.pop('ItemA') model = ols(y=y, x=lp, entity_effects=True, window=20) self.assert_(notnull(model.beta.values).all()) self.assert_(isinstance(model, PanelOLS)) model.summary def test_series_rhs(self): y = tm.makeTimeSeries() x = tm.makeTimeSeries() model = ols(y=y, x=x) expected = ols(y=y, x={'x' : x}) assert_series_equal(model.beta, expected.beta) def test_various_attributes(self): # just make sure everything "works". test correctness elsewhere x = DataFrame(np.random.randn(100, 5)) y = np.random.randn(100) model = ols(y=y, x=x, window=20) series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol'] for attr in series_attrs: value = getattr(model, attr) self.assert_(isinstance(value, Series)) # works model._results def test_catch_regressor_overlap(self): df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']] df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']] y = tm.makeTimeSeries() data = {'foo' : df1, 'bar' : df2} self.assertRaises(Exception, ols, y=y, x=data) def test_plm_ctor(self): y = tm.makeTimeDataFrame() x = {'a' : tm.makeTimeDataFrame(), 'b' : tm.makeTimeDataFrame()} model = ols(y=y, x=x, intercept=False) model.summary model = ols(y=y, x=Panel(x)) model.summary def test_plm_attrs(self): y = tm.makeTimeDataFrame() x = {'a' : tm.makeTimeDataFrame(), 'b' : tm.makeTimeDataFrame()} rmodel = ols(y=y, x=x, window=10) model = ols(y=y, x=x) model.resid rmodel.resid def test_plm_lagged_y_predict(self): y = tm.makeTimeDataFrame() x = {'a' : tm.makeTimeDataFrame(), 'b' : tm.makeTimeDataFrame()} model = ols(y=y, x=x, window=10) result = model.lagged_y_predict(2) def test_plm_f_test(self): y = tm.makeTimeDataFrame() x = {'a' : tm.makeTimeDataFrame(), 'b' : tm.makeTimeDataFrame()} model = ols(y=y, x=x) hyp = '1*a+1*b=0' result = model.f_test(hyp) hyp = ['1*a=0', '1*b=0'] result = model.f_test(hyp) assert_almost_equal(result['f-stat'], model.f_stat['f-stat']) def test_plm_exclude_dummy_corner(self): y = tm.makeTimeDataFrame() x = {'a' : tm.makeTimeDataFrame(), 'b' : tm.makeTimeDataFrame()} model = ols(y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'D'}) model.summary self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'E'}) def test_columns_tuples_summary(self): # #1837 X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')]) Y = Series(np.random.randn(10)) # it works! model = ols(y=Y, x=X) model.summary class TestPanelOLS(BaseTest): FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value', 'r2', 'r2_adj', 'rmse', 'std_err', 't_stat', 'var_beta'] _other_fields = ['resid', 'y_fitted'] def testFiltering(self): result = ols(y=self.panel_y2, x=self.panel_x2) x = result._x index = x.index.get_level_values(0) index = Index(sorted(set(index))) exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)]) self.assertTrue;(exp_index.equals(index)) index = x.index.get_level_values(1) index = Index(sorted(set(index))) exp_index = Index(['A', 'B']) self.assertTrue(exp_index.equals(index)) x = result._x_filtered index = x.index.get_level_values(0) index = Index(sorted(set(index))) exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3), datetime(2000, 1, 4)]) self.assertTrue(exp_index.equals(index)) assert_almost_equal(result._y.values.flat, [1, 4, 5]) exp_x = [[6, 14, 1], [9, 17, 1], [30, 48, 1]] assert_almost_equal(exp_x, result._x.values) exp_x_filtered = [[6, 14, 1], [9, 17, 1], [30, 48, 1], [11, 20, 1], [12, 21, 1]] assert_almost_equal(exp_x_filtered, result._x_filtered.values) self.assertTrue(result._x_filtered.index.levels[0].equals( result.y_fitted.index)) def test_wls_panel(self): y = tm.makeTimeDataFrame() x = Panel({'x1' : tm.makeTimeDataFrame(), 'x2' : tm.makeTimeDataFrame()}) y.ix[[1, 7], 'A'] = np.nan y.ix[[6, 15], 'B'] = np.nan y.ix[[3, 20], 'C'] = np.nan y.ix[[5, 11], 'D'] = np.nan stack_y = y.stack() stack_x = DataFrame(dict((k, v.stack()) for k, v in x.iterkv())) weights = x.std('items') stack_weights = weights.stack() stack_y.index = stack_y.index._tuple_index stack_x.index = stack_x.index._tuple_index stack_weights.index = stack_weights.index._tuple_index result = ols(y=y, x=x, weights=1/weights) expected = ols(y=stack_y, x=stack_x, weights=1/stack_weights) assert_almost_equal(result.beta, expected.beta) for attr in ['resid', 'y_fitted']: rvals = getattr(result, attr).stack().values evals = getattr(expected, attr).values assert_almost_equal(rvals, evals) def testWithTimeEffects(self): result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True) assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5]) exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]] assert_almost_equal(result._x_trans.values, exp_x) # _check_non_raw_results(result) def testWithEntityEffects(self): result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
pandas.util.testing.assert_almost_equal
# -*- coding: utf-8 -*- # Arithmetc tests for DataFrame/Series/Index/Array classes that should # behave identically. from datetime import timedelta import operator import pytest import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.core import ops from pandas.errors import NullFrequencyError from pandas._libs.tslibs import IncompatibleFrequency from pandas import ( Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex) # ------------------------------------------------------------------ # Fixtures @pytest.fixture def tdser(): """ Return a Series with dtype='timedelta64[ns]', including a NaT. """ return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') # ------------------------------------------------------------------ # Numeric dtypes Arithmetic with Timedelta Scalar class TestNumericArraylikeArithmeticWithTimedeltaScalar(object): @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="block.eval incorrect", strict=True)) ]) @pytest.mark.parametrize('index', [ pd.Int64Index(range(1, 11)), pd.UInt64Index(range(1, 11)), pd.Float64Index(range(1, 11)), pd.RangeIndex(1, 11)], ids=lambda x: type(x).__name__) @pytest.mark.parametrize('scalar_td', [ Timedelta(days=1), Timedelta(days=1).to_timedelta64(), Timedelta(days=1).to_pytimedelta()], ids=lambda x: type(x).__name__) def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box): # GH#19333 if (box is Series and type(scalar_td) is timedelta and index.dtype == 'f8'): raise pytest.xfail(reason="Cannot multiply timedelta by float") expected = pd.timedelta_range('1 days', '10 days') index = tm.box_expected(index, box) expected = tm.box_expected(expected, box) result = index * scalar_td tm.assert_equal(result, expected) commute = scalar_td * index tm.assert_equal(commute, expected) @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame]) @pytest.mark.parametrize('index', [ pd.Int64Index(range(1, 3)), pd.UInt64Index(range(1, 3)), pd.Float64Index(range(1, 3)), pd.RangeIndex(1, 3)], ids=lambda x: type(x).__name__) @pytest.mark.parametrize('scalar_td', [ Timedelta(days=1), Timedelta(days=1).to_timedelta64(), Timedelta(days=1).to_pytimedelta()], ids=lambda x: type(x).__name__) def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box): if box is Series and type(scalar_td) is timedelta: raise pytest.xfail(reason="TODO: Figure out why this case fails") if box is pd.DataFrame and isinstance(scalar_td, timedelta): raise pytest.xfail(reason="TODO: Figure out why this case fails") expected = TimedeltaIndex(['1 Day', '12 Hours']) index = tm.box_expected(index, box) expected = tm.box_expected(expected, box) result = scalar_td / index tm.assert_equal(result, expected) with pytest.raises(TypeError): index / scalar_td # ------------------------------------------------------------------ # Timedelta64[ns] dtype Arithmetic Operations class TestTimedeltaArraylikeAddSubOps(object): # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__ # ------------------------------------------------------------- # Invalid Operations @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame], ids=lambda x: x.__name__) def test_td64arr_add_str_invalid(self, box): # GH#13624 tdi = TimedeltaIndex(['1 day', '2 days']) tdi = tm.box_expected(tdi, box) with pytest.raises(TypeError): tdi + 'a' with pytest.raises(TypeError): 'a' + tdi @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame], ids=lambda x: x.__name__) @pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])]) @pytest.mark.parametrize('op', [operator.add, ops.radd, operator.sub, ops.rsub], ids=lambda x: x.__name__) def test_td64arr_add_sub_float(self, box, op, other): tdi = TimedeltaIndex(['-1 days', '-1 days']) tdi = tm.box_expected(tdi, box) if box is pd.DataFrame and op in [operator.add, operator.sub]: pytest.xfail(reason="Tries to align incorrectly, " "raises ValueError") with pytest.raises(TypeError): op(tdi, other) @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Tries to cast df to " "Period", strict=True, raises=IncompatibleFrequency)) ], ids=lambda x: x.__name__) @pytest.mark.parametrize('freq', [None, 'H']) def test_td64arr_sub_period(self, box, freq): # GH#13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq) idx = tm.box_expected(idx, box) with pytest.raises(TypeError): idx - p with pytest.raises(TypeError): p - idx @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="broadcasts along " "wrong axis", raises=ValueError, strict=True)) ], ids=lambda x: x.__name__) @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H']) @pytest.mark.parametrize('tdi_freq', [None, 'H']) def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq): # GH#20049 subtracting PeriodIndex should raise TypeError tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq) dti = Timestamp('2018-03-07 17:16:40') + tdi pi = dti.to_period(pi_freq) # TODO: parametrize over box for pi? tdi = tm.box_expected(tdi, box) with pytest.raises(TypeError): tdi - pi # ------------------------------------------------------------- # Binary operations td64 arraylike and datetime-like @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame], ids=lambda x: x.__name__) def test_td64arr_sub_timestamp_raises(self, box): idx = TimedeltaIndex(['1 day', '2 day']) idx = tm.box_expected(idx, box) msg = "cannot subtract a datelike from|Could not operate" with tm.assert_raises_regex(TypeError, msg): idx - Timestamp('2011-01-01') @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Returns object dtype", strict=True)) ], ids=lambda x: x.__name__) def test_td64arr_add_timestamp(self, box): idx = TimedeltaIndex(['1 day', '2 day']) expected = DatetimeIndex(['2011-01-02', '2011-01-03']) idx = tm.box_expected(idx, box) expected = tm.box_expected(expected, box) result = idx + Timestamp('2011-01-01') tm.assert_equal(result, expected) @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Returns object dtype", strict=True)) ], ids=lambda x: x.__name__) def test_td64_radd_timestamp(self, box): idx = TimedeltaIndex(['1 day', '2 day']) expected = DatetimeIndex(['2011-01-02', '2011-01-03']) idx = tm.box_expected(idx, box) expected = tm.box_expected(expected, box) # TODO: parametrize over scalar datetime types? result = Timestamp('2011-01-01') + idx tm.assert_equal(result, expected) # ------------------------------------------------------------------ # Operations with int-like others @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Attempts to broadcast " "incorrectly", strict=True, raises=ValueError)) ], ids=lambda x: x.__name__) def test_td64arr_add_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser + Series([2, 3, 4]) @pytest.mark.parametrize('box', [ pd.Index, pytest.param(Series, marks=pytest.mark.xfail(reason="GH#19123 integer " "interpreted as " "nanoseconds", strict=True)), pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Attempts to broadcast " "incorrectly", strict=True, raises=ValueError)) ], ids=lambda x: x.__name__) def test_td64arr_radd_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): Series([2, 3, 4]) + tdser @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Attempts to broadcast " "incorrectly", strict=True, raises=ValueError)) ], ids=lambda x: x.__name__) def test_td64arr_sub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser - Series([2, 3, 4]) @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame], ids=lambda x: x.__name__) @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds', strict=True) def test_td64arr_rsub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) with pytest.raises(TypeError): Series([2, 3, 4]) - tdser @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Tries to broadcast " "incorrectly", strict=True, raises=ValueError)) ], ids=lambda x: x.__name__) def test_td64arr_add_intlike(self, box): # GH#19123 tdi = TimedeltaIndex(['59 days', '59 days', 'NaT']) ser = tm.box_expected(tdi, box) err = TypeError if box is not pd.Index else NullFrequencyError other = Series([20, 30, 40], dtype='uint8') # TODO: separate/parametrize with pytest.raises(err): ser + 1 with pytest.raises(err): ser - 1 with pytest.raises(err): ser + other with pytest.raises(err): ser - other with pytest.raises(err): ser + np.array(other) with pytest.raises(err): ser - np.array(other) with pytest.raises(err): ser + pd.Index(other) with pytest.raises(err): ser - pd.Index(other) @pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame], ids=lambda x: x.__name__) @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser): if box is pd.DataFrame and isinstance(scalar, np.ndarray): # raises ValueError pytest.xfail(reason="DataFrame to broadcast incorrectly") tdser = tm.box_expected(tdser, box) err = TypeError if box is pd.Index and not isinstance(scalar, float): err = NullFrequencyError with pytest.raises(err): tdser + scalar with pytest.raises(err): scalar + tdser with pytest.raises(err): tdser - scalar with pytest.raises(err): scalar - tdser @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Tries to broadcast " "incorrectly", strict=True, raises=ValueError)) ], ids=lambda x: x.__name__) @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', 'uint64', 'uint32', 'uint16', 'uint8', 'float64', 'float32', 'float16']) @pytest.mark.parametrize('vec', [ np.array([1, 2, 3]), pd.Index([1, 2, 3]), Series([1, 2, 3]) # TODO: Add DataFrame in here? ], ids=lambda x: type(x).__name__) def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser): if type(vec) is Series and not dtype.startswith('float'): pytest.xfail(reason='GH#19123 integer interpreted as nanos') tdser = tm.box_expected(tdser, box) err = TypeError if box is pd.Index and not dtype.startswith('float'): err = NullFrequencyError vector = vec.astype(dtype) # TODO: parametrize over these four ops? with pytest.raises(err): tdser + vector with pytest.raises(err): vector + tdser with pytest.raises(err): tdser - vector with pytest.raises(err): vector - tdser # ------------------------------------------------------------------ # Operations with datetime-like others @pytest.mark.parametrize('box', [ pd.Index, Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail(reason="Returns object dtype " "instead of " "datetime64[ns]", strict=True)) ], ids=lambda x: x.__name__) def test_td64arr_add_sub_timestamp(self, box): # GH#11925 ts = Timestamp('2012-01-01') # TODO: parametrize over types of datetime scalar? tdser = Series(
pd.timedelta_range('1 day', periods=3)
pandas.timedelta_range
import numpy as np, pandas as pd from scipy.sparse import csc_matrix, csr_matrix, issparse, isspmatrix_csc, isspmatrix_csr, vstack as sp_vstack import warnings import multiprocessing import ctypes import json import os from copy import deepcopy from ._cpp_interface import isoforest_cpp_obj, _sort_csc_indices, _reconstruct_csr_sliced, _reconstruct_csr_with_categ, _get_has_openmp __all__ = ["IsolationForest"] ### Helpers def _get_num_dtype(X_num=None, sample_weights=None, column_weights=None): if X_num is not None: return np.empty(0, dtype=X_num.dtype) elif sample_weights is not None: return np.empty(0, dtype=column_weights.dtype) elif column_weights is not None: return np.empty(0, dtype=sample_weights.dtype) else: return np.empty(0, dtype=ctypes.c_double) def _get_int_dtype(X_num): if (X_num is not None) and (issparse(X_num)): return np.empty(0, dtype=X_num.indices.dtype) else: return np.empty(0, dtype=ctypes.c_size_t) def _is_row_major(X_num): if (X_num is None) or (issparse(X_num)): return False else: return X_num.strides[1] == X_num.dtype.itemsize def _is_col_major(X_num): if (X_num is None) or (issparse(X_num)): return False else: return X_num.strides[0] == X_num.dtype.itemsize def _copy_if_subview(X_num, prefer_row_major=False): ### TODO: the C++ functions should accept a 'leading dimension' ### parameter so as to avoid copying the data here if (X_num is not None) and (not issparse(X_num)): col_major = _is_col_major(X_num) leading_dimension = int(X_num.strides[1 if col_major else 0] / X_num.dtype.itemsize) if ( (leading_dimension != X_num.shape[0 if col_major else 1]) or (len(X_num.strides) != 2) or (not X_num.flags.aligned) or (not _is_row_major(X_num) and not _is_col_major(X_num)) ): X_num = X_num.copy() if _is_col_major(X_num) != col_major: if prefer_row_major: X_num = np.ascontiguousarray(X_num) else: X_num = np.asfortranarray(X_num) return X_num def _all_equal(x, y): if x.shape[0] != y.shape[0]: return False return np.all(x == y) def _encode_categorical(cl, categories): if (cl.shape[0] >= 100) and (cl.dtype.name == "category"): if _all_equal(cl.cat.categories, categories): return cl.cat.codes return pd.Categorical(cl, categories).codes class IsolationForest: """ Isolation Forest model Isolation Forest is an algorithm originally developed for outlier detection that consists in splitting sub-samples of the data according to some attribute/feature/column at random. The idea is that, the rarer the observation, the more likely it is that a random uniform split on some feature would put outliers alone in one branch, and the fewer splits it will take to isolate an outlier observation like this. The concept is extended to splitting hyperplanes in the extended model (i.e. splitting by more than one column at a time), and to guided (not entirely random) splits in the SCiForest model that aim at isolating outliers faster and finding clustered outliers. This version adds heuristics to handle missing data and categorical variables. Can be used to aproximate pairwise distances by checking the depth after which two observations become separated, and to approximate densities by fitting trees beyond balanced-tree limit. Offers options to vary between randomized and deterministic splits too. Note ---- The default parameters in this software do not correspond to the suggested parameters in any of the references. In particular, the following default values are likely to cause huge differences when compared to the defaults in other software: ``ndim``, ``sample_size``, ``ntrees``. The defaults here are nevertheless more likely to result in better models. In order to mimic scikit-learn for example, one would need to pass ``ndim=1``, ``sample_size=256``, ``ntrees=100``, ``missing_action="fail"``, ``nthreads=1``. Note ---- Shorthands for parameter combinations that match some of the references: 'iForest' (reference [1]_): ``ndim=1``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``. 'EIF' (reference [3]_): ``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``, ``coefs="uniform"``, ``standardize_data=False`` (plus standardizing the data **before** passing it). 'SCiForest' (reference [4]_): ``ndim=2``, ``sample_size=256``, ``max_depth=8``, ``ntrees=100``, ``missing_action="fail"``, ``coefs="normal"``, ``ntry=10``, ``prob_pick_avg_gain=1``, ``penalize_range=True``. Might provide much better results with ``max_depth=None`` despite the reference's recommendation. Note ---- The model offers many tunable parameters. The most likely candidate to tune is ``prob_pick_pooled_gain``, for which higher values tend to result in a better ability to flag outliers in the training data at the expense of hindered performance when making predictions (calling method ``predict``) on new data (including out-of-bag samples for each tree) and poorer generalizability to inputs with values outside the variables' ranges to which the model was fit (see plots generated from the examples in GitHub notebook for a better idea of the difference). The next candidate to tune is ``sample_size`` - the default is to use all rows, but in some datasets introducing sub-sampling can help, especially for the single-variable model. In smaller datasets, one might also want to experiment with ``weigh_by_kurtosis`` and perhaps lower ``ndim``. If using ``prob_pick_pooled_gain``, models are likely to benefit from deeper trees (controlled by ``max_depth``), but using large samples and/or deeper trees can result in significantly slower model fitting and predictions - in such cases, using ``min_gain`` (with a value like 0.25) with ``max_depth=None`` can offer a better speed/performance trade-off than changing ``max_depth``. Note ---- The default parameters will not scale to large datasets. In particular, if the amount of data is large, it's suggested to set a smaller sample size for each tree (parameter ``sample_size``) and to fit fewer of them (parameter ``ntrees``). As well, the default option for 'missing_action' might slow things down significantly. See the documentation of the parameters for more details. These defaults can also result in very big model sizes in memory and as serialized files (e.g. models that weight over 10GB) when the number of rows in the data is large. Using fewer trees, smaller sample sizes, and shallower trees can help to reduce model sizes if that becomes a problem. Note ---- See the documentation of ``predict`` for some considerations when serving models generated through this library. Parameters ---------- sample_size : str "auto", int, float(0,1), or None Sample size of the data sub-samples with which each binary tree will be built. If passing 'None', each tree will be built using the full data. Recommended value in [1]_, [2]_, [3]_ is 256, while the default value in the author's code in [5]_ is 'None' here. If passing "auto", will use the full number of rows in the data, up to 10,000 (i.e. will take 'sample_size=min(nrows(X), 10000)') **when calling fit**, and the full amount of rows in the data **when calling the variants** ``fit_predict`` or ``fit_transform``. If passing ``None``, will take the full number of rows in the data (no sub-sampling). If passing a number between zero and one, will assume it means taking a sample size that represents that proportion of the rows in the data. Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the model needs more trees and/or bigger samples to reach convergence (unless using non-random splits, in which case the distribution is likely to be centered around a much lower number), or that the distributions in the data are too skewed for random uniform splits. ntrees : int Number of binary trees to build for the model. Recommended value in [1]_ is 100, while the default value in the author's code in [5]_ is 10. In general, the number of trees required for good results is higher when (a) there are many columns, (b) there are categorical variables, (c) categorical variables have many categories, (d) `ndim` is high, (e) ``prob_pick_pooled_gain`` is used. Hint: seeing a distribution of scores which is on average too far below 0.5 could mean that the model needs more trees and/or bigger samples to reach convergence (unless using non-random splits, in which case the distribution is likely to be centered around a much lower number), or that the distributions in the data are too skewed for random uniform splits. ndim : int Number of columns to combine to produce a split. If passing 1, will produce the single-variable model described in [1]_ and [2]_, while if passing values greater than 1, will produce the extended model described in [3]_ and [4]_. Recommended value in [4]_ is 2, while [3]_ recommends a low value such as 2 or 3. Models with values higher than 1 are referred hereafter as the extended model (as in [3]_). Note that, when using ``ndim>1`` plus ``standardize_data=True``, the variables are standardized at each step as suggested in [4]_, which makes the models slightly different than in [3]_. ntry : int In the extended model with non-random splits, how many random combinations to try for determining the best gain. Only used when deciding splits by gain (see documentation for parameters 'prob_pick_avg_gain' and 'prob_pick_pooled_gain'). Recommended value in [4]_ is 10. Ignored for single-variable model. categ_cols : None or array-like Columns that hold categorical features, when the data is passed as an array or matrix. Categorical columns should contain only integer values with a continuous numeration starting at zero, with negative values and NaN taken as missing, and the array or list passed here should correspond to the column numbers, with numeration starting at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`). This might be passed either at construction time or when calling ``fit`` or variations of ``fit``. This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as categorical depending on their dtype (see the documentation for ``fit`` for details). max_depth : int, None, or str "auto" Maximum depth of the binary trees to grow. If passing None, will build trees until each observation ends alone in a terminal node or until no further split is possible. If using "auto", will limit it to the corresponding depth of a balanced binary tree with number of terminal nodes corresponding to the sub-sample size (the reason being that, if trying to detect outliers, an outlier will only be so if it turns out to be isolated with shorter average depth than usual, which corresponds to a balanced tree depth). When a terminal node has more than 1 observation, the remaining isolation depth for them is estimated assuming the data and splits are both uniformly random (separation depth follows a similar process with expected value calculated as in [6]_). Default setting for [1]_, [2]_, [3]_, [4]_ is "auto", but it's recommended to pass higher values if using the model for purposes other than outlier detection. Note that models that use ``prob_pick_pooled_gain`` or ``prob_pick_avg_gain`` are likely to benefit from deeper trees (larger ``max_depth``), but deeper trees can result in much slower model fitting and predictions. If using pooled gain, one might want to substitute ``max_depth`` with ``min_gain``. ncols_per_tree : None, int, or float(0,1] Number of columns to use (have as potential candidates for splitting at each iteration) in each tree, somewhat similar to the 'mtry' parameter of random forests. In general, this is only relevant when using non-random splits and/or weighting by kurtosis. If passing a number between zero and one, will assume it means taking a sample size that represents that proportion of the columns in the data. If passing exactly 1, will assume it means taking 100% of the columns rather than taking 1 column. If passing ``None`` (the default) or zero, will use the full number of available columns. prob_pick_avg_gain : float(0, 1) * For the single-variable model (``ndim=1``), this parameter indicates the probability of making each split by choosing a column and split point in that same column as both the column and split point that gives the largest averaged gain (as proposed in [4]_) across all available columns and possible splits in each column. Note that this implies evaluating every single column in the sample data when this type of split happens, which will potentially make the model fitting much slower, but has no impact on prediction time. For categorical variables, will take the expected standard deviation that would be gotten if the column were converted to numerical by assigning to each category a random number ~ Unif(0, 1) and calculate gain with those assumed standard deviations. * For the extended model, this parameter indicates the probability that the split point in the chosen linear combination of variables will be decided by this averaged gain criterion. Compared to a pooled average, this tends to result in more cases in which a single observation or very few of them are put into one branch. Recommended to use sub-samples (parameter 'sample_size') when passing this parameter. Note that, since this will create isolated nodes faster, the resulting object will be lighter (use less memory). When splits are not made according to any of 'prob_pick_avg_gain', 'prob_pick_pooled_gain', 'prob_split_avg_gain', 'prob_split_pooled_gain', both the column and the split point are decided at random. Default setting for [1]_, [2]_, [3]_ is zero, and default for [4]_ is 1. This is the randomization parameter that can be passed to the author's original code in [5]_, but note that the code in [5]_ suffers from a mathematical error in the calculation of running standard deviations, so the results from it might not match with this library's. Note that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable model, every single tree will have the exact same splits. Under this option, models are likely to produce better results when increasing ``max_depth``. Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of outlier scores is unlikely to be centered around 0.5. prob_pick_pooled_gain : float(0, 1) * For the single-variable model (``ndim=1``), this parameter indicates the probability of making each split by choosing a column and split point in that same column as both the column and split point that gives the largest pooled gain (as used in decision tree classifiers such as C4.5 in [7]_) across all available columns and possible splits in each column. Note that this implies evaluating every single column in the sample data when this type of split happens, which will potentially make the model fitting much slower, but has no impact on prediction time. For categorical variables, will use shannon entropy instead (like in [7]_). * For the extended model, this parameter indicates the probability that the split point in the chosen linear combination of variables will be decided by this pooled gain criterion. Compared to a simple average, this tends to result in more evenly-divided splits and more clustered groups when they are smaller. Recommended to pass higher values when used for imputation of missing values. When used for outlier detection, higher values of this parameter result in models that are able to better flag outliers in the training data of each tree, but generalize poorly to outliers in new data (including out-of-bag samples for each tree) and to values of variables outside of the ranges from the training data. Passing small 'sample_size' and high values of this parameter will tend to flag too many outliers. Note that, since this makes the trees more even and thus it takes more steps to produce isolated nodes, the resulting object will be heavier. When splits are not made according to any of 'prob_pick_avg_gain', 'prob_pick_pooled_gain', 'prob_split_avg_gain', 'prob_split_pooled_gain', both the column and the split point are decided at random. Note that, if passing value 1 (100%) with no sub-sampling and using the single-variable model, every single tree will have the exact same splits. Be aware that ``penalize_range`` can also have a large impact when using ``prob_pick_pooled_gain``. Under this option, models are likely to produce better results when increasing ``max_depth``. Alternatively, one can also control the depth through ``min_gain`` (for which one might want to set ``max_depth=None``). Important detail: if using either ``prob_pick_avg_gain`` or ``prob_pick_pooled_gain``, the distribution of outlier scores is unlikely to be centered around 0.5. prob_split_avg_gain : float(0, 1) Probability of making each split by selecting a column at random and determining the split point as that which gives the highest averaged gain. Not supported for the extended model as the splits are on linear combinations of variables. See the documentation for parameter 'prob_pick_avg_gain' for more details. prob_split_pooled_gain : float(0, 1) Probability of making each split by selecting a column at random and determining the split point as that which gives the highest pooled gain. Not supported for the extended model as the splits are on linear combinations of variables. See the documentation for parameter 'prob_pick_pooled_gain' for more details. min_gain : float > 0 Minimum gain that a split threshold needs to produce in order to proceed with a split. Only used when the splits are decided by a gain criterion (either pooled or averaged). If the highest possible gain in the evaluated splits at a node is below this threshold, that node becomes a terminal node. This can be used as a more sophisticated depth control when using pooled gain (note that ``max_depth`` still applies on top of this heuristic). missing_action : str, one of "divide" (single-variable only), "impute", "fail", "auto" How to handle missing data at both fitting and prediction time. Options are: ``"divide"``: (For the single-variable model only, recommended) Will follow both branches and combine the result with the weight given by the fraction of the data that went to each branch when fitting the model. ``"impute"``: Will assign observations to the branch with the most observations in the single-variable model, or fill in missing values with the median of each column of the sample from which the split was made in the extended model (recommended for the extended model). ``"fail"``: Will assume there are no missing values and will trigger undefined behavior if it encounters any. ``"auto"``: Will use "divide" for the single-variable model and "impute" for the extended model. In the extended model, infinite values will be treated as missing. Passing "fail" will produce faster fitting and prediction times along with decreased model object sizes. Models from [1]_, [2]_, [3]_, [4]_ correspond to "fail" here. new_categ_action : str, one of "weighted" (single-variable only), "impute" (extended only), "smallest", "random" What to do after splitting a categorical feature when new data that reaches that split has categories that the sub-sample from which the split was done did not have. Options are: ``"weighted"``: (For the single-variable model only, recommended) Will follow both branches and combine the result with weight given by the fraction of the data that went to each branch when fitting the model. ``"impute"``: (For the extended model only, recommended) Will assign them the median value for that column that was added to the linear combination of features. ``"smallest"``: In the single-variable case will assign all observations with unseen categories in the split to the branch that had fewer observations when fitting the model, and in the extended case will assign them the coefficient of the least common category. ``"random"``: Will assing a branch (coefficient in the extended model) at random for each category beforehand, even if no observations had that category when fitting the model. Note that this can produce biased results when deciding splits by a gain criterion. Important: under this option, if the model is fitted to a ``DataFrame``, when calling ``predict`` on new data which contains new categories (unseen in the data to which the model was fitted), they will be added to the model's state on-the-fly. This means that, if calling ``predict`` on data which has new categories, there might be inconsistencies in the results if predictions are done in parallel or if passing the same data in batches or with different row orders. It also means that the ``predict`` function will not be thread-safe (e.g. cannot be used alongside ``joblib`` with a backend that uses shared memory). ``"auto"``: Will select "weighted" for the single-variable model and "impute" for the extended model. Ignored when passing 'categ_split_type' = 'single_categ'. categ_split_type : str, one of "subset" or "single_categ" Whether to split categorical features by assigning sub-sets of them to each branch, or by assigning a single category to a branch and the rest to the other branch. For the extended model, whether to give each category a coefficient, or only one while the rest get zero. all_perm : bool When doing categorical variable splits by pooled gain with ``ndim=1`` (regular model), whether to consider all possible permutations of variables to assign to each branch or not. If ``False``, will sort the categories by their frequency and make a grouping in this sorted order. Note that the number of combinations evaluated (if ``True``) is the factorial of the number of present categories in a given column (minus 2). For averaged gain, the best split is always to put the second most-frequent category in a separate branch, so not evaluating all permutations (passing ``False``) will make it possible to select other splits that respect the sorted frequency order. Ignored when not using categorical variables or not doing splits by pooled gain or using ``ndim > 1``. coef_by_prop : bool In the extended model, whether to sort the randomly-generated coefficients for categories according to their relative frequency in the tree node. This might provide better results when using categorical variables with too many categories, but is not recommended, and not reflective of real "categorical-ness". Ignored for the regular model (``ndim=1``) and/or when not using categorical variables. recode_categ : bool Whether to re-encode categorical variables even in case they are already passed as ``pd.Categorical``. This is recommended as it will eliminate potentially redundant categorical levels if they have no observations, but if the categorical variables are already of type ``pd.Categorical`` with only the levels that are present, it can be skipped for slightly faster fitting times. You'll likely want to pass ``False`` here if merging several models into one through ``append_trees``. weights_as_sample_prob : bool If passing sample (row) weights when fitting the model, whether to consider those weights as row sampling weights (i.e. the higher the weights, the more likely the observation will end up included in each tree sub-sample), or as distribution density weights (i.e. putting a weight of two is the same as if the row appeared twice, thus higher weight makes it less of an outlier). Note that sampling weight is only used when sub-sampling data for each tree, which is not the default in this implementation. sample_with_replacement : bool Whether to sample rows with replacement or not (not recommended). Note that distance calculations, if desired, don't work well with duplicate rows. penalize_range : bool Whether to penalize (add -1 to the terminal depth) observations at prediction time that have a value of the chosen split variable (linear combination in extended model) that falls outside of a pre-determined reasonable range in the data being split (given by 2 * range in data and centered around the split point), as proposed in [4]_ and implemented in the authors' original code in [5]_. Not used in single-variable model when splitting by categorical variables. It's recommended to turn this off for faster predictions on sparse CSC matrices. Note that this can make a very large difference in the results when using ``prob_pick_pooled_gain``. Be aware that this option can make the distribution of outlier scores a bit different (i.e. not centered around 0.5) standardize_data : bool Whether to standardize the features at each node before creating alinear combination of them as suggested in [4]_. This is ignored when using ``ndim=1``. weigh_by_kurtosis : bool Whether to weigh each column according to the kurtosis obtained in the sub-sample that is selected for each tree as briefly proposed in [1]_. Note that this is only done at the beginning of each tree sample, so if not using sub-samples, it's better to pass column weights calculated externally. For categorical columns, will calculate expected kurtosis if the column was converted to numerical by assigning to each category a random number ~ Unif(0, 1). Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that uses sums of squares and higher-power numbers, which has less numerical precision than the calculation used for dense inputs, and as such, the results might differ slightly. Using this option makes the model more likely to pick the columns that have anomalous values when viewed as a 1-d distribution, and can bring a large improvement in some datasets. coefs : str, one of "normal" or "uniform" For the extended model, whether to sample random coefficients according to a normal distribution ~ N(0, 1) (as proposed in [4]_) or according to a uniform distribution ~ Unif(-1, +1) as proposed in [3]_. Ignored for the single-variable model. Note that, for categorical variables, the coefficients will be sampled ~ N (0,1) regardless - in order for both types of variables to have transformations in similar ranges (which will tend to boost the importance of categorical variables), pass ``"uniform"`` here. assume_full_distr : bool When calculating pairwise distances (see [8]_), whether to assume that the fitted model represents a full population distribution (will use a standardizing criterion assuming infinite sample, and the results of the similarity between two points at prediction time will not depend on the prescence of any third point that is similar to them, but will differ more compared to the pairwise distances between points from which the model was fit). If passing 'False', will calculate pairwise distances as if the new observations at prediction time were added to the sample to which each tree was fit, which will make the distances between two points potentially vary according to other newly introduced points. This will not be assumed when the distances are calculated as the model is being fit (see documentation for method 'fit_transform'). build_imputer : bool Whether to construct missing-value imputers so that later this same model could be used to impute missing values of new (or the same) observations. Be aware that this will significantly increase the memory requirements and serialized object sizes. Note that this is not related to 'missing_action' as missing values inside the model are treated differently and follow their own imputation or division strategy. min_imp_obs : int Minimum number of observations with which an imputation value can be produced. Ignored if passing 'build_imputer' = 'False'. depth_imp : str, one of "higher", "lower", "same" How to weight observations according to their depth when used for imputing missing values. Passing "higher" will weigh observations higher the further down the tree (away from the root node) the terminal node is, while "lower" will do the opposite, and "same" will not modify the weights according to node depth in the tree. Implemented for testing purposes and not recommended to change from the default. Ignored when passing 'build_imputer' = 'False'. weigh_imp_rows : str, one of "inverse", "prop", "flat" How to weight node sizes when used for imputing missing values. Passing "inverse" will weigh a node inversely proportional to the number of observations that end up there, while "proportional" will weight them heavier the more observations there are, and "flat" will weigh all nodes the same in this regard regardless of how many observations end up there. Implemented for testing purposes and not recommended to change from the default. Ignored when passing 'build_imputer' = 'False'. random_seed : int Seed that will be used for random number generation. nthreads : int Number of parallel threads to use. If passing a negative number, will use the same formula as joblib does for calculating number of threads (which is n_cpus + 1 + n_jobs - i.e. pass -1 to use all available threads). Note that, the more threads, the more memory will be allocated, even if the thread does not end up being used. Be aware that most of the operations are bound by memory bandwidth, which means that adding more threads will not result in a linear speed-up. For some types of data (e.g. large sparse matrices with small sample sizes), adding more threads might result in only a very modest speed up (e.g. 1.5x faster with 4x more threads), even if all threads look fully utilized. n_estimators : None or int Synonym for ``ntrees``, kept for better compatibility with scikit-learn. max_samples : None or int Synonym for ``sample_size``, kept for better compatibility with scikit-learn. n_jobs : None or int Synonym for ``nthreads``, kept for better compatibility with scikit-learn. random_state : None, int, or RandomState Synonym for ``random_seed``, kept for better compatibility with scikit-learn. bootstrap : None or bool Synonym for ``sample_with_replacement``, kept for better compatibility with scikit-learn. Attributes ---------- cols_numeric_ : array(n_num_features,) Array with the names of the columns that were taken as numerical (Only when fitting the model to a DataFrame object). cols_categ_ : array(n_categ_features,) Array with the names of the columns that were taken as categorical (Only when fitting the model to a DataFrame object). is_fitted_ : bool Indicator telling whether the model has been fit to data or not. References ---------- .. [1] Liu, <NAME>, <NAME>, and <NAME>. "Isolation forest." 2008 Eighth IEEE International Conference on Data Mining. IEEE, 2008. .. [2] Liu, <NAME>, <NAME>, and <NAME>. "Isolation-based anomaly detection." ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3. .. [3] Hariri, Sahand, <NAME>, and <NAME>. "Extended Isolation Forest." arXiv preprint arXiv:1811.02141 (2018). .. [4] Liu, <NAME>, <NAME>, and <NAME>. "On detecting clustered anomalies using SCiForest." Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, Berlin, Heidelberg, 2010. .. [5] https://sourceforge.net/projects/iforest/ .. [6] https://math.stackexchange.com/questions/3388518/expected-number-of-paths-required-to-separate-elements-in-a-binary-tree .. [7] <NAME>. C4. 5: programs for machine learning. Elsevier, 2014. .. [8] <NAME>. "Distance approximation using Isolation Forests." arXiv preprint arXiv:1910.12362 (2019). .. [9] <NAME>. "Imputing missing values with unsupervised random trees." arXiv preprint arXiv:1911.06646 (2019). .. [10] https://math.stackexchange.com/questions/3333220/expected-average-depth-in-random-binary-tree-constructed-top-to-bottom """ def __init__(self, sample_size = "auto", ntrees = 500, ndim = 3, ntry = 3, categ_cols = None, max_depth = "auto", ncols_per_tree = None, prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0, prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0, min_gain = 0., missing_action = "auto", new_categ_action = "auto", categ_split_type = "subset", all_perm = False, coef_by_prop = False, recode_categ = False, weights_as_sample_prob = True, sample_with_replacement = False, penalize_range = False, standardize_data = True, weigh_by_kurtosis = False, coefs = "normal", assume_full_distr = True, build_imputer = False, min_imp_obs = 3, depth_imp = "higher", weigh_imp_rows = "inverse", random_seed = 1, nthreads = -1, n_estimators = None, max_samples = None, n_jobs = None, random_state = None, bootstrap = None): self.sample_size = sample_size self.ntrees = ntrees self.ndim = ndim self.ntry = ntry self.categ_cols = categ_cols self.max_depth = max_depth self.ncols_per_tree = ncols_per_tree self.prob_pick_avg_gain = prob_pick_avg_gain self.prob_pick_pooled_gain = prob_pick_pooled_gain self.prob_split_avg_gain = prob_split_avg_gain self.prob_split_pooled_gain = prob_split_pooled_gain self.min_gain = min_gain self.missing_action = missing_action self.new_categ_action = new_categ_action self.categ_split_type = categ_split_type self.all_perm = all_perm self.coef_by_prop = coef_by_prop self.recode_categ = recode_categ self.weights_as_sample_prob = weights_as_sample_prob self.sample_with_replacement = sample_with_replacement self.penalize_range = penalize_range self.standardize_data = standardize_data self.weigh_by_kurtosis = weigh_by_kurtosis self.coefs = coefs self.assume_full_distr = assume_full_distr self.build_imputer = build_imputer self.min_imp_obs = min_imp_obs self.depth_imp = depth_imp self.weigh_imp_rows = weigh_imp_rows self.random_seed = random_seed self.nthreads = nthreads self.n_estimators = n_estimators self.max_samples = max_samples self.n_jobs = n_jobs self.random_state = random_state self.bootstrap = bootstrap self._reset_obj() def _init(self, categ_cols = None): if categ_cols is not None: if self.categ_cols is not None: warnings.warn("Passed 'categ_cols' in constructor and fit method. Will take the latter.") self.categ_cols = categ_cols self._initialize_full( sample_size = self.sample_size if (self.max_samples is None) else self.max_samples, ntrees = self.ntrees if (self.n_estimators is None) else self.n_estimators, ndim = self.ndim, ntry = self.ntry, categ_cols = self.categ_cols, max_depth = self.max_depth, ncols_per_tree = self.ncols_per_tree, prob_pick_avg_gain = self.prob_pick_avg_gain, prob_pick_pooled_gain = self.prob_pick_pooled_gain, prob_split_avg_gain = self.prob_split_avg_gain, prob_split_pooled_gain = self.prob_split_pooled_gain, min_gain = self.min_gain, missing_action = self.missing_action, new_categ_action = self.new_categ_action, categ_split_type = self.categ_split_type, all_perm = self.all_perm, coef_by_prop = self.coef_by_prop, recode_categ = self.recode_categ, weights_as_sample_prob = self.weights_as_sample_prob, sample_with_replacement = self.sample_with_replacement if (self.bootstrap is None) else self.bootstrap, penalize_range = self.penalize_range, standardize_data = self.standardize_data, weigh_by_kurtosis = self.weigh_by_kurtosis, coefs = self.coefs, assume_full_distr = self.assume_full_distr, build_imputer = self.build_imputer, min_imp_obs = self.min_imp_obs, depth_imp = self.depth_imp, weigh_imp_rows = self.weigh_imp_rows, random_seed = self.random_seed if (self.random_state is None) else self.random_state, nthreads = self.nthreads if (self.n_jobs is None) else self.n_jobs) def _initialize_full(self, sample_size = None, ntrees = 500, ndim = 3, ntry = 3, categ_cols = None, max_depth = "auto", ncols_per_tree = None, prob_pick_avg_gain = 0.0, prob_pick_pooled_gain = 0.0, prob_split_avg_gain = 0.0, prob_split_pooled_gain = 0.0, min_gain = 0., missing_action = "auto", new_categ_action = "auto", categ_split_type = "subset", all_perm = False, coef_by_prop = False, recode_categ = True, weights_as_sample_prob = True, sample_with_replacement = False, penalize_range = True, standardize_data = True, weigh_by_kurtosis = False, coefs = "normal", assume_full_distr = True, build_imputer = False, min_imp_obs = 3, depth_imp = "higher", weigh_imp_rows = "inverse", random_seed = 1, nthreads = -1): if (sample_size is not None) and (sample_size != "auto"): assert sample_size > 0 if sample_size > 1: assert isinstance(sample_size, int) elif sample_size == 1: sample_size = None if ncols_per_tree is not None: assert ncols_per_tree > 0 if ncols_per_tree > 1: assert isinstance(ncols_per_tree, int) elif ncols_per_tree == 1: ncols_per_tree = None assert ntrees > 0 assert isinstance(ntrees, int) if (max_depth != "auto") and (max_depth is not None): assert max_depth > 0 assert isinstance(max_depth, int) if (sample_size is not None) and (sample_size != "auto"): assert max_depth < sample_size assert ndim >= 1 assert isinstance(ndim, int) assert ntry >= 1 assert isinstance(ntry, int) if isinstance(random_seed, np.random.RandomState): random_seed = random_seed.randint(np.iinfo(np.int32).max) if isinstance(random_seed, np.random.Generator): random_seed = random_seed.integers(np.iinfo(np.int32).max) random_seed = int(random_seed) assert random_seed >= 0 assert isinstance(min_imp_obs, int) assert min_imp_obs >= 1 assert missing_action in ["divide", "impute", "fail", "auto"] assert new_categ_action in ["weighted", "smallest", "random", "impute", "auto"] assert categ_split_type in ["single_categ", "subset"] assert coefs in ["normal", "uniform"] assert depth_imp in ["lower", "higher", "same"] assert weigh_imp_rows in ["inverse", "prop", "flat"] assert prob_pick_avg_gain >= 0 assert prob_pick_pooled_gain >= 0 assert prob_split_avg_gain >= 0 assert prob_split_pooled_gain >= 0 assert min_gain >= 0 s = prob_pick_avg_gain + prob_pick_pooled_gain + prob_split_avg_gain + prob_split_pooled_gain if s > 1: warnings.warn("Split type probabilities sum to more than 1, will standardize them") prob_pick_avg_gain /= s prob_pick_pooled_gain /= s prob_split_avg_gain /= s prob_split_pooled_gain /= s if (ndim == 1) and ((sample_size is None) or (sample_size == "auto")) and ((prob_pick_avg_gain >= 1) or (prob_pick_pooled_gain >= 1)) and (not sample_with_replacement): msg = "Passed parameters for deterministic single-variable splits" msg += " with no sub-sampling. " msg += "Every tree fitted will end up doing exactly the same splits. " msg += "It's recommended to set 'prob_pick_avg_gain' < 1, 'prob_pick_pooled_gain' < 1, " msg += "or to use the extended model (ndim > 1)." warnings.warn(msg) if missing_action == "auto": if ndim == 1: missing_action = "divide" else: missing_action = "impute" if new_categ_action == "auto": if ndim == 1: new_categ_action = "weighted" else: new_categ_action = "impute" if (build_imputer) and (missing_action == "fail"): raise ValueError("Cannot impute missing values when passing 'missing_action' = 'fail'.") if ndim == 1: if (categ_split_type != "single_categ") and (new_categ_action == "impute"): raise ValueError("'new_categ_action' = 'impute' not supported in single-variable model.") else: if (prob_split_avg_gain > 0) or (prob_split_pooled_gain > 0): msg = "Non-zero values for 'prob_split_avg_gain' " msg += "and 'prob_split_pooled_gain' not meaningful in " msg += "extended model." raise ValueError(msg) if missing_action == "divide": raise ValueError("'missing_action' = 'divide' not supported in extended model.") if (categ_split_type != "single_categ") and (new_categ_action == "weighted"): raise ValueError("'new_categ_action' = 'weighted' not supported in extended model.") if (weigh_by_kurtosis) and (ndim == 1) and (prob_pick_pooled_gain + prob_split_avg_gain) >= 1: msg = "'weigh_by_kurtosis' is incompatible with deterministic column selection" msg += " ('prob_pick_pooled_gain' and ' prob_split_avg_gain'). Will be forced to 'False'." warnings.warn(msg) weigh_by_kurtosis = False if nthreads is None: nthreads = 1 elif nthreads < 0: nthreads = multiprocessing.cpu_count() + 1 + nthreads assert nthreads > 0 assert isinstance(nthreads, int) if (nthreads > 1) and (not _get_has_openmp()): msg_omp = "Attempting to use more than 1 thread, but " msg_omp += "package was built without multi-threading " msg_omp += "support - see the project's GitHub page for " msg_omp += "more information." warnings.warn(msg_omp) if categ_cols is not None: categ_cols = np.array(categ_cols).reshape(-1).astype(int) categ_cols.sort() self.sample_size = sample_size self.ntrees = ntrees self.ndim = ndim self.ntry = ntry self.categ_cols = categ_cols self.max_depth = max_depth self.ncols_per_tree = ncols_per_tree self.prob_pick_avg_gain = prob_pick_avg_gain self.prob_pick_pooled_gain = prob_pick_pooled_gain self.prob_split_avg_gain = prob_split_avg_gain self.prob_split_pooled_gain = prob_split_pooled_gain self.min_gain = min_gain self.missing_action = missing_action self.new_categ_action = new_categ_action self.categ_split_type = categ_split_type self.coefs = coefs self.depth_imp = depth_imp self.weigh_imp_rows = weigh_imp_rows self.min_imp_obs = min_imp_obs self.random_seed = random_seed self.nthreads = nthreads self.all_perm = bool(all_perm) self.recode_categ = bool(recode_categ) self.coef_by_prop = bool(coef_by_prop) self.weights_as_sample_prob = bool(weights_as_sample_prob) self.sample_with_replacement = bool(sample_with_replacement) self.penalize_range = bool(penalize_range) self.standardize_data = bool(standardize_data) self.weigh_by_kurtosis = bool(weigh_by_kurtosis) self.assume_full_distr = bool(assume_full_distr) self.build_imputer = bool(build_imputer) self._reset_obj() def _reset_obj(self): self.cols_numeric_ = np.array([]) self.cols_categ_ = np.array([]) self._cat_mapping = list() self._cat_max_lev = np.array([]) self._ncols_numeric = 0 self._ncols_categ = 0 self.is_fitted_ = False self._ntrees = 0 self._cpp_obj = isoforest_cpp_obj() self._is_extended_ = self.ndim > 1 def copy(self): """ Get a deep copy of this object Returns ------- copied : obj A deep copy of this object """ if not self.is_fitted_: self._cpp_obj = isoforest_cpp_obj() return deepcopy(self) else: obj_restore = self._cpp_obj obj_new = self._cpp_obj.deepcopy() try: self._cpp_obj = None out = deepcopy(self) finally: self._cpp_obj = obj_restore out._cpp_obj = obj_new return out def get_params(self, deep=True): """ Get parameters for this estimator. Kept for compatibility with scikit-learn. Parameters ---------- deep : bool Ignored. Returns ------- params : dict Parameter names mapped to their values. """ import inspect return {param.name:getattr(self, param.name) for param in inspect.signature(self.__init__).parameters.values()} def set_params(self, **params): """ Set the parameters of this estimator. Kept for compatibility with scikit-learn. Note ---- Setting any parameter other than the number of threads will reset the model - that is, if it was fitted to some data, the fitted model will be lost, and it will need to be refitted before being able to make predictions. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance. """ if not (len(params) == 1 and ("nthreads" in params or "n_jobs" in params)): self.is_fitted_ = False valid_params = self.get_params(deep=False) for k,v in params.items(): if k not in valid_params: raise ValueError("Invalid parameter: ", k) setattr(self, k, v) return self def __str__(self): msg = "" if self._is_extended_: msg += "Extended " msg += "Isolation Forest model" if (self.prob_pick_avg_gain + self.prob_pick_pooled_gain) > 0 or \ (self.ndim == 1 and (self.prob_split_avg_gain + self.prob_split_pooled_gain) > 0): msg += " (using guided splits)" msg += "\n" if self.ndim > 1: msg += "Splitting by %d variables at a time\n" % self.ndim if self.is_fitted_: msg += "Consisting of %d trees\n" % self._ntrees if self._ncols_numeric > 0: msg += "Numeric columns: %d\n" % self._ncols_numeric if self._ncols_categ: msg += "Categorical columns: %d\n" % self._ncols_categ return msg def __repr__(self): return self.__str__() def _get_model_obj(self): return self._cpp_obj.get_cpp_obj(self._is_extended_) def _get_imputer_obj(self): return self._cpp_obj.get_imputer() def _check_can_use_imputer(self, X_cat): if (self.build_imputer) and (self.ndim == 1) and (X_cat is not None) and (X_cat.shape[1]): if (self.categ_split_type != "single_categ") and (self.new_categ_action == "weighted"): raise ValueError("Cannot build imputer with 'ndim=1' + 'new_categ_action=weighted'.") if self.missing_action == "divide": raise ValueError("Cannot build imputer with 'ndim=1' + 'missing_action=divide'.") def fit(self, X, y = None, sample_weights = None, column_weights = None, categ_cols = None): """ Fit isolation forest model to data Parameters ---------- X : array or array-like (n_samples, n_features) Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix. If passing a DataFrame, will assume that columns are: - Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'. - Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that, if `Categorical` dtypes are ordered, the order will be ignored here. Other dtypes are not supported. Note that, if passing NumPy arrays, they are used in column-major order (a.k.a. "Fortran arrays"), and if they are not already in column-major format, will need to create a copy of the data. y : None Not used. Kept as argument for compatibility with SciKit-learn pipelining. sample_weights : None or array(n_samples,) Sample observation weights for each row of 'X', with higher weights indicating either higher sampling probability (i.e. the observation has a larger effect on the fitted model, if using sub-samples), or distribution density (i.e. if the weight is two, it has the same effect of including the same data point twice), according to parameter 'weights_as_sample_prob' in the model constructor method. column_weights : None or array(n_features,) Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion. If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis. categ_cols : None or array-like Columns that hold categorical features, when the data is passed as an array or matrix. Categorical columns should contain only integer values with a continuous numeration starting at zero, with negative values and NaN taken as missing, and the array or list passed here should correspond to the column numbers, with numeration starting at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`). This might be passed either at construction time or when calling ``fit`` or variations of ``fit``. This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as categorical depending on their dtype. Returns ------- self : obj This object. """ self._init(categ_cols) if ( self.sample_size is None and (sample_weights is not None) and (self.weights_as_sample_prob) ): raise ValueError("Sampling weights are only supported when using sub-samples for each tree.") if column_weights is not None and self.weigh_by_kurtosis: raise ValueError("Cannot pass column weights when weighting columns by kurtosis.") self._reset_obj() X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, sample_weights, column_weights) self._check_can_use_imputer(X_cat) if self.sample_size is None: sample_size = nrows elif self.sample_size == "auto": sample_size = min(nrows, 10000) if (sample_weights is not None) and (self.weights_as_sample_prob): raise ValueError("Sampling weights are only supported when using sub-samples for each tree.") elif self.sample_size <= 1: sample_size = int(np.ceil(self.sample_size * nrows)) if sample_size < 2: raise ValueError("Sampling proportion amounts to a single row or less.") else: sample_size = self.sample_size if self.max_depth == "auto": max_depth = 0 limit_depth = True elif self.max_depth is None: max_depth = nrows - 1 limit_depth = False else: max_depth = self.max_depth limit_depth = False if self.ncols_per_tree is None: ncols_per_tree = 0 elif self.ncols_per_tree <= 1: ncols_tot = 0 if X_num is not None: ncols_tot += X_num.shape[1] if X_cat is not None: ncols_tot += X_cat.shape[1] ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot)) else: ncols_per_tree = self.ncols_per_tree if isinstance(self.random_state, np.random.RandomState): seed = self.random_state.randint(np.iinfo(np.int32).max) else: seed = self.random_seed self._cpp_obj.fit_model(_get_num_dtype(X_num, sample_weights, column_weights), _get_int_dtype(X_num), X_num, X_cat, ncat, sample_weights, column_weights, ctypes.c_size_t(nrows).value, ctypes.c_size_t(self._ncols_numeric).value, ctypes.c_size_t(self._ncols_categ).value, ctypes.c_size_t(self.ndim).value, ctypes.c_size_t(self.ntry).value, self.coefs, ctypes.c_bool(self.coef_by_prop).value, ctypes.c_bool(self.sample_with_replacement).value, ctypes.c_bool(self.weights_as_sample_prob).value, ctypes.c_size_t(sample_size).value, ctypes.c_size_t(self.ntrees).value, ctypes.c_size_t(max_depth).value, ctypes.c_size_t(ncols_per_tree).value, ctypes.c_bool(limit_depth).value, ctypes.c_bool(self.penalize_range).value, ctypes.c_bool(self.standardize_data).value, ctypes.c_bool(False).value, ctypes.c_bool(False).value, ctypes.c_bool(False).value, ctypes.c_bool(False).value, ctypes.c_bool(False).value, ctypes.c_bool(self.weigh_by_kurtosis).value, ctypes.c_double(self.prob_pick_avg_gain).value, ctypes.c_double(self.prob_split_avg_gain).value, ctypes.c_double(self.prob_pick_pooled_gain).value, ctypes.c_double(self.prob_split_pooled_gain).value, ctypes.c_double(self.min_gain).value, self.missing_action, self.categ_split_type, self.new_categ_action, ctypes.c_bool(self.build_imputer).value, ctypes.c_size_t(self.min_imp_obs).value, self.depth_imp, self.weigh_imp_rows, ctypes.c_bool(self.build_imputer).value, ctypes.c_bool(False).value, ctypes.c_uint64(seed).value, ctypes.c_int(self.nthreads).value) self.is_fitted_ = True self._ntrees = self.ntrees return self def fit_predict(self, X, column_weights = None, output_outlierness = "score", output_distance = None, square_mat = False, output_imputed = False, categ_cols = None): """ Fit the model in-place and produce isolation or separation depths along the way See the documentation of other methods ('init', 'fit', 'predict', 'predict_distance') for details. Note ---- The data must NOT contain any duplicate rows. Note ---- This function will be faster at predicting average depths than calling 'fit' + 'predict' separately when using full row samples. Note ---- If using 'penalize_range' = 'True', the resulting scores/depths from this function might differ a bit from those of 'fit' + 'predict' ran separately. Note ---- Sample weights are not supported for this method. Note ---- When using multiple threads, there can be small differences in the predicted scores or average depth or separation/distance between runs due to roundoff error. Parameters ---------- X : array or array-like (n_samples, n_features) Data to which to fit the model. Can pass a NumPy array, Pandas DataFrame, or SciPy sparse CSC matrix. If passing a DataFrame, will assume that columns are: - Numeric, if their dtype is a subtype of NumPy's 'number' or 'datetime64'. - Categorical, if their dtype is 'object', 'Categorical', or 'bool'. Note that, if `Categorical` dtypes are ordered, the order will be ignored here. Other dtypes are not supported. column_weights : None or array(n_features,) Sampling weights for each column in 'X'. Ignored when picking columns by deterministic criterion. If passing None, each column will have a uniform weight. Cannot be used when weighting by kurtosis. Note that, if passing a DataFrame with both numeric and categorical columns, the column names must not be repeated, otherwise the column weights passed here will not end up matching. output_outlierness : None or str in ["score", "avg_depth"] Desired type of outlierness output. If passing "score", will output standardized outlier score. If passing "avg_depth" will output average isolation depth without standardizing. If passing 'None', will skip outlierness calculations. output_distance : None or str in ["dist", "avg_sep"] Type of distance output to produce. If passing "dist", will standardize the average separation depths. If passing "avg_sep", will output the average separation depth without standardizing it (note that lower separation depth means furthest distance). If passing 'None', will skip distance calculations. square_mat : bool Whether to produce a full square matrix with the distances. If passing 'False', will output only the upper triangular part as a 1-d array in which entry (i,j) with 0 <= i < j < n is located at position p(i,j) = (i * (n - (i+1)/2) + j - i - 1). Ignored when passing 'output_distance' = 'None'. output_imputed : bool Whether to output the data with imputed missing values. Model object must have been initialized with 'build_imputer' = 'True'. categ_cols : None or array-like Columns that hold categorical features, when the data is passed as an array or matrix. Categorical columns should contain only integer values with a continuous numeration starting at zero, with negative values and NaN taken as missing, and the array or list passed here should correspond to the column numbers, with numeration starting at zero. The maximum categorical value should not exceed 'INT_MAX' (typically :math:`2^{31}-1`). This might be passed either at construction time or when calling ``fit`` or variations of ``fit``. This is ignored when the input is passed as a ``DataFrame`` as then it will consider columns as categorical depending on their dtype. Returns ------- output : array(n_samples,), or dict Requested outputs about isolation depth (outlierness), pairwise separation depth (distance), and/or imputed missing values. If passing either 'output_distance' or 'output_imputed', will return a dictionary with keys "pred" (array(n_samples,)), "dist" (array(n_samples * (n_samples - 1) / 2,) or array(n_samples, n_samples)), "imputed" (array-like(n_samples, n_columns)), according to whether each output type is present. """ self._init(categ_cols) if (self.sample_size is not None) and (self.sample_size != "auto"): raise ValueError("Cannot use 'fit_predict' when the sample size is limited.") if self.sample_with_replacement: raise ValueError("Cannot use 'fit_predict' or 'fit_transform' when sampling with replacement.") if column_weights is not None and self.weigh_by_kurtosis: raise ValueError("Cannot pass column weights when weighting columns by kurtosis.") if (output_outlierness is None) and (output_distance is None): raise ValueError("Must pass at least one of 'output_outlierness' or 'output_distance'.") if output_outlierness is not None: assert output_outlierness in ["score", "avg_depth"] if output_distance is not None: assert output_distance in ["dist", "avg_sep"] if output_imputed: if self.missing_action == "fail": raise ValueError("Cannot impute missing values when using 'missing_action' = 'fail'.") if not self.build_imputer: msg = "Trying to impute missing values from object " msg += "that was initialized with 'build_imputer' = 'False' " msg += "- will force 'build_imputer' to 'True'." warnings.warn(msg) self.build_imputer = True self._reset_obj() X_num, X_cat, ncat, sample_weights, column_weights, nrows = self._process_data(X, None, column_weights) self._check_can_use_imputer(X_cat) if (output_imputed) and (issparse(X_num)): msg = "Imputing missing values from CSC matrix on-the-fly can be very slow, " msg += "it's recommended if possible to fit the model first and then pass the " msg += "same matrix as CSR to 'transform'." warnings.warn(msg) if self.max_depth == "auto": max_depth = 0 limit_depth = True elif self.max_depth is None: max_depth = nrows - 1 else: max_depth = self.max_depth limit_depth = False if self.ncols_per_tree is None: ncols_per_tree = 0 elif self.ncols_per_tree <= 1: ncols_tot = 0 if X_num is not None: ncols_tot += X_num.shape[1] if X_cat is not None: ncols_tot += X_cat.shape[1] ncols_per_tree = int(np.ceil(self.ncols_per_tree * ncols_tot)) else: ncols_per_tree = self.ncols_per_tree if isinstance(self.random_state, np.random.RandomState): seed = self.random_state.randint(np.iinfo(np.int32).max) else: seed = self.random_seed depths, tmat, dmat, X_num, X_cat = self._cpp_obj.fit_model(_get_num_dtype(X_num, None, column_weights), _get_int_dtype(X_num), X_num, X_cat, ncat, None, column_weights, ctypes.c_size_t(nrows).value, ctypes.c_size_t(self._ncols_numeric).value, ctypes.c_size_t(self._ncols_categ).value, ctypes.c_size_t(self.ndim).value, ctypes.c_size_t(self.ntry).value, self.coefs, ctypes.c_bool(self.coef_by_prop).value, ctypes.c_bool(self.sample_with_replacement).value, ctypes.c_bool(self.weights_as_sample_prob).value, ctypes.c_size_t(nrows).value, ctypes.c_size_t(self.ntrees).value, ctypes.c_size_t(max_depth).value, ctypes.c_size_t(ncols_per_tree).value, ctypes.c_bool(limit_depth).value, ctypes.c_bool(self.penalize_range).value, ctypes.c_bool(self.standardize_data).value, ctypes.c_bool(output_distance is not None).value, ctypes.c_bool(output_distance == "dist").value, ctypes.c_bool(square_mat).value, ctypes.c_bool(output_outlierness is not None).value, ctypes.c_bool(output_outlierness == "score").value, ctypes.c_bool(self.weigh_by_kurtosis).value, ctypes.c_double(self.prob_pick_avg_gain).value, ctypes.c_double(self.prob_split_avg_gain).value, ctypes.c_double(self.prob_pick_pooled_gain).value, ctypes.c_double(self.prob_split_pooled_gain).value, ctypes.c_double(self.min_gain).value, self.missing_action, self.categ_split_type, self.new_categ_action, ctypes.c_bool(self.build_imputer).value, ctypes.c_size_t(self.min_imp_obs).value, self.depth_imp, self.weigh_imp_rows, ctypes.c_bool(output_imputed).value, ctypes.c_bool(self.all_perm).value, ctypes.c_uint64(seed).value, ctypes.c_int(self.nthreads).value) self.is_fitted_ = True self._ntrees = self.ntrees if (not output_distance) and (not output_imputed): return depths else: outp = {"pred" : depths} if output_distance: if square_mat: outp["dist"] = dmat else: outp["dist"] = tmat if output_imputed: outp["imputed"] = self._rearrange_imputed(X, X_num, X_cat) return outp def _process_data(self, X, sample_weights, column_weights): ### TODO: this needs a refactoring after introducing 'categ_cols' if X.__class__.__name__ == "DataFrame": if self.categ_cols is not None: warnings.warn("'categ_cols' is ignored when passing a DataFrame as input.") self.categ_cols = None ### https://stackoverflow.com/questions/25039626/how-do-i-find-numeric-columns-in-pandas X_num = X.select_dtypes(include = [np.number, np.datetime64]).to_numpy() if X_num.dtype not in [ctypes.c_double, ctypes.c_float]: X_num = X_num.astype(ctypes.c_double) if not _is_col_major(X_num): X_num = np.asfortranarray(X_num) X_cat = X.select_dtypes(include = [pd.CategoricalDtype, "object", "bool"]) if (X_num.shape[1] + X_cat.shape[1]) == 0: raise ValueError("Input data has no columns of numeric or categorical type.") elif (X_num.shape[1] + X_cat.shape[1]) < X.shape[1]: cols_num = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values) cols_cat = np.array(X_cat.columns.values) msg = "Only numeric and categorical columns are supported." msg += " Got passed the following types: [" msg += ", ".join([str(X[cl].dtype) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3]) msg += "]\n(Sample problem columns: [" msg += ", ".join([str(cl) for cl in X.columns.values if cl not in cols_num and cl not in cols_cat][:3]) msg += "])" raise ValueError(msg) self._ncols_numeric = X_num.shape[1] self._ncols_categ = X_cat.shape[1] self.cols_numeric_ = np.array(X.select_dtypes(include = [np.number, np.datetime64]).columns.values) self.cols_categ_ = np.array(X.select_dtypes(include = [pd.CategoricalDtype, "object", "bool"]).columns.values) if not self._ncols_numeric: X_num = None else: nrows = X_num.shape[0] if not self._ncols_categ: X_cat = None else: nrows = X_cat.shape[0] has_ordered = False if X_cat is not None: self._cat_mapping = [None for cl in range(X_cat.shape[1])] for cl in range(X_cat.shape[1]): if (X_cat[X_cat.columns[cl]].dtype.name == "category") and (X_cat[X_cat.columns[cl]].dtype.ordered): has_ordered = True if (not self.recode_categ) and (X_cat[X_cat.columns[cl]].dtype.name == "category"): self._cat_mapping[cl] = np.array(X_cat[X_cat.columns[cl]].cat.categories) X_cat = X_cat.assign(**{X_cat.columns[cl] : X_cat[X_cat.columns[cl]].cat.codes}) else: cl, self._cat_mapping[cl] =
pd.factorize(X_cat[X_cat.columns[cl]])
pandas.factorize
import numpy as np import pandas as pd from ggplot import * area = np.array([30, 35, 37, 59, 70, 76, 88, 100]).astype(np.float32) price = np.array([1100, 1423, 1377, 1800, 2304, 2588, 3495, 4839]).astype(np.float32) data_1 = {"area":
pd.Series(area)
pandas.Series
#!/usr/bin/env python # coding: utf-8 # ### DSCI 510 Fall 2021 Final Project Submission # 1. **The name of student**: # # <NAME> # 2. **About the project (Motivation):** # # I am a fan of soccer. Therefore, I decided to choose this topic as my fist data analysis project. # Analysis I would like to do with the combined data is to find: # # 1) What affects the transfer cost of a soccer player? For example, does the number of goals affect the transfer cost of a soccer player? # # 2) Do players with a high market value influence the results of the match? # 3. **Datasources:** # # **Source 1** = https://www.transfermarkt.us/ - one of the biggest soccer databases and communities in the world. # We will get information about the most valuable players by web-scrapping. # # **Source 2** = https://api.football-data.org - External public API, provides football data and statistics # (live scores, fixtures, tables, squads, lineups/subs, etc.) in a machine-readable way. # We will get information about players, their results of the matches by API requests. # # **Source 3** = https://www.theguardian.com/football - the part of news-portal about soccer with current standings of soccer clubs. # We will get information about soccer clubs in 5 top European soccer leagues by web-scrapping. # 4. **Information about API keys for Source 2:** # # We have to register to receive an API key by email. The free API key has limitations. # We will be available to send no more than 10 requests in a minute. # 5. **How to run the code** # # We can get the clean data used in this notebook analysis simply from the data subfolder where the data sets have existed already, or you can run the data_collector.py file to get the data sets from the Internet. # # To do so, using command-line: python .\src\data_collector.py, then datasets will be stored in the data subfolder. # # Be ready that it takes more than 25 minutes to scrape datasets from sources (especially source2) due to the API source having a limitation of 10 calls/minute. # # This project requires the following packages: # # pandas, numpy, seaborn, requests, and beautifulsoup To run this project, make sure the above packages are installed, and then simply clone the repo at https://github.com/bauyrzha/DSCI510-finalproject and execute this notebook. # # If it cannot successfully run, check the requirements.txt. # # We can also collect data from sources separately by running Scrapping_source_1.py, Api_request_source_2.py, and Scrapping_source_3.py. # # Analysis performed for combained data sources 1 and 2 # # 1) Before analyzing let's find out what variables we have. # # **name** - name of players. # # **position** - position of players on the soccer pitch. # # **Age** - age of players. # # **Nat.** - nationality of players. # # **Market value** - the cost of players in the transfer market. # # **club** - the name of clubs where players are playing. # # **Goals** - the number of goals of players in the current season (2021-2022). # # **Assists** - the number of assists of players in the current season (2021-2022). # # **win** - the number of wins in the current season (2021-2022). # # **draw** - the number of draws in the current season (2021-2022). # # **lost** - the number of losses in the current season (2021-2022). # 6. **Сreate new variables that will be needed for analysis.** # # Below the following variables will be created: # # **player_avg_points** - the average earned points of players in one match in the current season (2021-2022). # # **continent** - we divided players into two categories: players who are from European countries and who are from other (non-Europe countries) # #import necessary libraries import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from bs4 import BeautifulSoup import requests import sys import http.client import json import time import os args = sys.argv[1:] def scrape_analysis(): print('it takes more than 35 minutes to scrape datasets from the internet') ##############################################################Source1############################################################################# #load webpage https://www.transfermarkt.co.uk # headers will be used to trick the website that we open it like a browser, not like a scrapping tool. headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'} page = 'https://www.transfermarkt.co.uk/spieler-statistik/wertvollstespieler/marktwertetop?ajax=yw1&altersklasse=alle&ausrichtung=alle&jahrgang=0&kontinent_id=0&land_id=0&page=1&plus=1&spielerposition_id=alle' try: content = requests.get(page, headers=headers) except: print('Source 1 unavailable. Check the internet connection') soup = BeautifulSoup(content.content, 'html.parser') #identify columns from content page player_table = soup.find("table", attrs={"class": "items"}) columns = [] for th in player_table.find_all('th'): title = th.text if title !='': columns.append(title) else: columns.append(th.find('div').get('title')) # in the website not all headers have values, it uses icons, therefore we get title value # below when we scrap the data we will get some unnecessary items # we create some extra columns, then we will drop them columns.insert(1, "delete1") columns.insert(2, "delete2") columns.insert(4, "position") columns2 = ["delete3","delete4", "delete5"] columns.extend(columns2) # create DataFrame with necessary columns data_tm = pd.DataFrame(columns = columns) # Because of necessary data are divided into 20 webpages, we will use for loop for i in range(1,21): page = 'https://www.transfermarkt.co.uk/spieler-statistik/wertvollstespieler/marktwertetop?ajax=yw1&altersklasse=alle&ausrichtung=alle&jahrgang=0&kontinent_id=0&land_id=0&page=' + str(i) + '&plus=1&spielerposition_id=alle' try: content = requests.get(page, headers=headers) except: print('Source 1 unavailable. Check the internet connection') soup = BeautifulSoup(content.content, 'html.parser') #scrap data from content page listo = [] player_table = soup.find("table", attrs={"class": "items"}) for tr in player_table.find_all('tr'): data = tr.find_all('td') for tr in data: item = tr.text if item !='': listo.append(item) else: listo.append(tr.find('img').get('alt')) # where images are used instead of item, we get information from images. # insert data into DataFrame for i in range(0, len(listo), len(columns)): kalisto = listo[i:i + len(columns)] data_tm_length = len(data_tm) data_tm.loc[data_tm_length] = kalisto # make copy of data_frame df_source1 = data_tm.copy() # rename a column df_source1.rename(columns={'Player': 'name'}, inplace = True) # leave only necessary columns df_source1 = df_source1[['name', 'position', 'Age', 'Nat.', 'Market value', 'club', 'Goals', 'Assists']] # checking our DataFrame df_source1 path = '../data' # Check whether the specified path exists or not isExist = os.path.exists(path) if not isExist: # Create a new directory because it does not exist os.makedirs(path) print("The new directory is created!") # export our data into csv file. df_source1.to_csv('../data/df_source1.csv', index=False) print("df_source1 is exported to '../data/df_source1.csv'") ############################################################################Source2################################################################ #my api_key. You will receive your own if you register on https://api.football-data.org Api_key = '<KEY>' # create connection with https://api.football-data.org # get competitions' id and name connection = http.client.HTTPConnection('api.football-data.org') headers = { 'X-Auth-Token': Api_key } try: connection.request('GET', '/v2/competitions/', None, headers) response = json.loads(connection.getresponse().read().decode()) p = response['competitions'] except: print('Source 2 unavailable or you reached the maximum request in particular time. You must not request more often than 10 calls/minute. Check the internet connection or Api key') competitions = pd.DataFrame() for item in p: competitions = competitions.append(item, ignore_index=True) competitions = competitions[['id', 'name', 'code']] competitions['id'] = competitions['id'].astype('int64') competitions # We use 5 top european soccer ligues # Create a list of ids' of 5 top european soccer ligues listo = [] kalisto = ['PL', 'BL1', 'SA', 'PD','FL1'] # code of 5 european soccer ligues (we can find from source 2) df = competitions.query("code == 'PL'") listo.append(df["id"].iloc[0]) df = competitions.query("code == 'BL1'") listo.append(df["id"].iloc[0]) df = competitions.query("code == 'SA'") listo.append(df["id"].iloc[0]) df = competitions.query("code == 'PD'") listo.append(df["id"].iloc[0]) df = competitions.query("code == 'FL1'") listo.append(df["id"].iloc[0]) # get teams' id, name which participate in 5 top european soccer ligues connection = http.client.HTTPConnection('api.football-data.org') headers = { 'X-Auth-Token': Api_key } teams = pd.DataFrame() for i in listo: time.sleep(8) try: connection.request('GET', '/v2/competitions/' + str(i) + '/teams?season=2021', None, headers) response = json.loads(connection.getresponse().read().decode()) p = response['teams'] except: print('Source 2 unavailable or you reached the maximum request in particular time. You must not request more often than 10 calls/minute. Check the internet connection or Api key') for item in p: teams = teams.append(item, ignore_index=True) teams = teams[['id', 'name']] teams['id'] = teams['id'].astype('int64') #teams.info() # Read in the data set from source 1 to retrieve necessary club names df_source1 = pd.read_csv('../data/df_source1.csv') # checking our DataFrame #df_source1.info() # list of clubs from source 1 club_list = df_source1['club'].to_list() #print(club_list) # matching list of clubs from source 1 with source 2 club_df = teams[teams['name'].isin(club_list)] #club_df.info() # we have 35 matches, for a simple analysis they are enough # get list of clubs' id id_clubs = club_df["id"].to_list() #print(id_clubs) #timeout time.sleep(8) # get players' id, name which play for the clubs # we use time.sleep due to this api source has limitation 10 calls/minute # the data will be collected approximately 3-4 minutes # If you do not want to wait, we can import already collected data from csv file, just uncomment the code (the cell) below connection = http.client.HTTPConnection('api.football-data.org') headers = { 'X-Auth-Token': Api_key } players = pd.DataFrame() for i in id_clubs: time.sleep(8) try: connection.request('GET', '/v2/teams/' + str(i), None, headers) response = json.loads(connection.getresponse().read().decode()) p = response['squad'] except: print('Source 2 unavailable or you reached the maximum request in particular time. You must not request more often than 10 calls/minute. Check the internet connection or Api key') for item in p: players = players.append(item, ignore_index=True) players = players[['id', 'name']] players['id'] = players['id'].astype('int64') #players.info() # Read in the data set from source 1 to retrieve necessary player names #players = pd.read_csv('../data/players_api.csv') # export our data into csv file. players.to_csv('../data/players_api.csv', index=False) # list of players from source 1 player_list = df_source1['name'].to_list() # matching list of players from source 1 with source 2 player_df = players[players['name'].isin(player_list)] #player_df.info() # we have 202 matches, for a simple analysis they are enough # get list of players' id and list of player names id_players = player_df["id"].to_list() name_players = player_df["name"].to_list() # we leave the players who matched df_source2 = df_source1[df_source1['name'].isin(name_players)] #df_source2.info() # we sort by name player_df = player_df.sort_values(by=['name']) df_source2 = df_source2.sort_values(by=['name']) # we add the "id" column from source2 into source1 df_source2['id'] = player_df['id'].values # checking our DataFrame #df_source2.info() #timeout time.sleep(8) # we get the number of wins, draws, and losts of players between 2021-08-01(start season) and 2021-11-23 (the day when we write this code) # we use time.sleep due to this api source has limitation 10 calls/minute # the data will be collected approximately 20 minutes # If you do not want to wait, we can import already collected data from csv file, just uncomment the code (the cell) below connection = http.client.HTTPConnection('api.football-data.org') headers = { 'X-Auth-Token': Api_key } d = {} win_list = [] draw_list = [] lost_list = [] match_result = pd.DataFrame() for i in df_source2['id'].values: time.sleep(8) try: connection.request('GET', '/v2/players/' + str(i) + '/matches?status=FINISHED&dateFrom=2021-08-01&dateTo=2021-11-23', None, headers ) response = json.loads(connection.getresponse().read().decode()) a = df_source2[(df_source2['id'] == i)] p = response['matches'] except: print('Source 2 unavailable or you reached the maximum request in particular time. You must not request more often than 10 calls/minute. Check the internet connection or Api key') win = 0 draw = 0 lost = 0 for t in p: s = t['score'] h = t['homeTeam'] nh = h['name'] if nh in a['club'].values and s['winner'] == 'HOME_TEAM': win += 1 elif s['winner'] == 'DRAW': draw += 1 else: lost += 1 win_list.append(win) draw_list.append(draw) lost_list.append(lost) d['win'] = win_list d['draw'] = draw_list d['lost'] = lost_list match_result =
pd.DataFrame.from_dict(d)
pandas.DataFrame.from_dict
# coding: utf-8 # --- # # _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ # # --- # # The Series Data Structure # In[ ]: import pandas as pd # In[ ]: animals = ['Tiger', 'Bear', 'Moose'] pd.Series(animals) # In[ ]: numbers = [1, 2, 3] pd.Series(numbers) # In[ ]: animals = ['Tiger', 'Bear', None] pd.Series(animals) # In[ ]: numbers = [1, 2, None] pd.Series(numbers) # In[ ]: import numpy as np np.nan == None # In[ ]: np.nan == np.nan # In[ ]: np.isnan(np.nan) # In[ ]: sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s # In[ ]: s.index # In[ ]: s =
pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
pandas.Series
#!/usr/bin/env python # coding: utf-8 import json import pandas as pd from pandas.api.types import is_numeric_dtype import numpy as np from scipy.stats import ks_2samp, chisquare #import matplotlib.pyplot as plt import plotly.graph_objs as go import plotly.express as px from plotly.subplots import make_subplots from evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo from evidently.widgets.widget import Widget red = "#ed0400" grey = "#4d4d4d" class UnderperformSegmTableWidget(Widget): def __init__(self, title: str): super().__init__() self.title = title def get_info(self) -> BaseWidgetInfo: if self.wi: return self.wi raise ValueError("no widget info provided") def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping): if column_mapping: date_column = column_mapping.get('datetime') id_column = column_mapping.get('id') target_column = column_mapping.get('target') prediction_column = column_mapping.get('prediction') num_feature_names = column_mapping.get('numerical_features') if num_feature_names is None: num_feature_names = [] else: num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])] cat_feature_names = column_mapping.get('categorical_features') if cat_feature_names is None: cat_feature_names = [] else: cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])] else: date_column = 'datetime' if 'datetime' in reference_data.columns else None id_column = None target_column = 'target' if 'target' in reference_data.columns else None prediction_column = 'prediction' if 'prediction' in reference_data.columns else None utility_columns = [date_column, id_column, target_column, prediction_column] num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns)) cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns)) if production_data is not None: production_data.replace([np.inf, -np.inf], np.nan, inplace=True) production_data.dropna(axis=0, how='any', inplace=True) reference_data.replace([np.inf, -np.inf], np.nan, inplace=True) reference_data.dropna(axis=0, how='any', inplace=True) ref_error = reference_data[prediction_column] - reference_data[target_column] prod_error = production_data[prediction_column] - production_data[target_column] ref_quntile_5 = np.quantile(ref_error, .05) ref_quntile_95 = np.quantile(ref_error, .95) prod_quntile_5 = np.quantile(prod_error, .05) prod_quntile_95 = np.quantile(prod_error, .95) #create subplots reference_data['dataset'] = 'Reference' reference_data['Error bias'] = list(map(lambda x : 'Underestimation' if x <= ref_quntile_5 else 'Majority' if x < ref_quntile_95 else 'Overestimation', ref_error)) production_data['dataset'] = 'Current' production_data['Error bias'] = list(map(lambda x : 'Underestimation' if x <= prod_quntile_5 else 'Majority' if x < prod_quntile_95 else 'Overestimation', prod_error)) merged_data =
pd.concat([reference_data, production_data])
pandas.concat
''' ... ''' import os import numpy as np import pandas as pd import datetime as dt from tqdm import tqdm import lib.utils as utils import lib.db_utils as dutils from datetime import timedelta from collections import defaultdict from dateutil.relativedelta import relativedelta class DefineCohortSettings: def __init__(self, vacineja2plus_df, init_cohort, final_cohort): ''' Description. Args: vacineja2plus_df: ''' self.vacineja2plus_df = vacineja2plus_df.copy() self.init_cohort = init_cohort self.final_cohort = final_cohort def define_eligibility(self, partial=14, fully=14, return_=True): ''' ''' subset = ["DATA D1(VACINADOS)", "DATA D2(VACINADOS)"] self.vacineja2plus_df["VACINA STATUS - COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_when_vaccine(x, self.init_cohort, self.final_cohort), axis=1) self.vacineja2plus_df["IMUNIZACAO MAXIMA ATE FIM DA COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_immunization(x, self.init_cohort, self.final_cohort, partial, fully), axis=1) # --> Eligibility by tests subset = ["DATA SOLICITACAO(TESTES)", "DATA COLETA(TESTES)", "RESULTADO FINAL GAL-INTEGRASUS"] self.vacineja2plus_df["ELIGIBILIDADE TESTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_eligible_test(x, self.init_cohort, self.final_cohort), axis=1) subset = "IMUNIZACAO MAXIMA ATE FIM DA COORTE" aptos = ["NAO VACINADO", "PARCIALMENTE IMUNIZADO", "TOTALMENTE IMUNIZADO", "VACINADO SEM IMUNIZACAO"] self.vacineja2plus_df["ELIGIBILIDADE COORTE GERAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x in aptos else "NAO APTO") # --> Eligibility for cases partial self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO PARCIAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="PARCIALMENTE IMUNIZADO" else "NAO APTO") # --> Eligibility for cases fully self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO TOTAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="TOTALMENTE IMUNIZADO" else "NAO APTO") # --> Create column with age based on the final of cohort. self.vacineja2plus_df["IDADE"] = self.vacineja2plus_df["DATA NASCIMENTO(VACINEJA)"].apply(lambda x: relativedelta(self.final_cohort, x.date()).years) self.vacineja2plus_df = self.vacineja2plus_df.drop_duplicates(subset=["CPF"], keep="first") if return_: return self.vacineja2plus_df def dynamical_matching(self, vaccine="CORONAVAC", return_=True, verbose=False, age_thr=18, seed=0): ''' Description. Args: return_: Return: ''' if "ELIGIBILIDADE TESTE" not in self.vacineja2plus_df.columns: return -1 datelst = utils.generate_date_list(self.init_cohort, self.final_cohort) # --> Apply essential filters # First, consider only people with age older or equal to 18 years old. df = self.vacineja2plus_df[self.vacineja2plus_df["IDADE"]>=age_thr] df = df[df["OBITO INCONSISTENCIA"]!="S"] df = df[df["DATA VACINA CONSISTENCIA"]!="N"] # Filter by eligibility df = df[(df["ELIGIBILIDADE TESTE"]=="APTO") & (df["ELIGIBILIDADE COORTE GERAL"]=="APTO")] # Obtain set of vaccinated and unvaccinated. df_vaccinated = df[df["VACINA(VACINADOS)"]==vaccine] df_vaccinated = df_vaccinated.dropna(subset=["DATA D1(VACINADOS)"], axis=0) df_unvaccinated = df[pd.isna(df["VACINA(VACINADOS)"])] if verbose: print(f"Dimensão de elegíveis após aplicacão das condições: {df.shape}") print(f"Número restante de óbitos: {df['DATA OBITO'].notnull().sum()}") print(f"Número restante de hospitalizados: {df['DATA HOSPITALIZACAO'].notnull().sum()}") print(f"Número restante de testes: {df['DATA SOLICITACAO(TESTES)'].notnull().sum()}") print(f"Número de vacinados elegíveis para {vaccine}: {df_vaccinated.shape[0]}") #condition_exposed1 = df_vaccinated["ELIGIBILIDADE TESTE"]=="APTO" #condition_exposed2 = df_vaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO" #df_vaccinated = df_vaccinated[(condition_exposed1) & (condition_exposed2)] #condition_unexposed1 = df_unvaccinated["ELIGIBILIDADE TESTE"]=="APTO" #condition_unexposed2 = df_unvaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO" #df_unvaccinated = df_unvaccinated[(condition_unexposed1) & (condition_unexposed2)] # -- CREATE CONTROL RESERVOIR -- control_dates = { "D1": defaultdict(lambda:-1), "DEATH": defaultdict(lambda:-1), "HOSPITAL": defaultdict(lambda:-1) } control_reservoir = defaultdict(lambda:[]) control_used = defaultdict(lambda: False) df_join = pd.concat([df_vaccinated, df_unvaccinated]) print("Criando reservatório de controles ...") for j in tqdm(range(0, df_join.shape[0])): cpf = df_join["CPF"].iat[j] age = df_join["IDADE"].iat[j] sex = df_join["SEXO(VACINEJA)"].iat[j] d1 = df_join["DATA D1(VACINADOS)"].iat[j] dt_death = df_join["DATA OBITO"].iat[j] dt_hospt = df_join["DATA HOSPITALIZACAO"].iat[j] control_reservoir[(age,sex)].append(cpf) if not pd.isna(d1): control_dates["D1"][cpf] = d1.date() if not pd.isna(dt_death): control_dates["DEATH"][cpf] = dt_death.date() if not pd.isna(dt_hospt): control_dates["HOSPITAL"][cpf] = dt_hospt.date() if seed!=0: np.random.seed(seed) for key in control_reservoir.keys(): np.random.shuffle(control_reservoir[key]) matchings = defaultdict(lambda:-1) print("Executando pareamento ...") for cur_date in tqdm(datelst): # Select all people who was vaccinated at the current date df_vaccinated["compare_date"] = df_vaccinated["DATA D1(VACINADOS)"].apply(lambda x: "TRUE" if x.date()==cur_date else "FALSE") current_vaccinated = df_vaccinated[df_vaccinated["compare_date"]=="TRUE"] #print(current_vaccinated.shape) cpf_list = current_vaccinated["CPF"].tolist() age_list = current_vaccinated["IDADE"].tolist() sex_list = current_vaccinated["SEXO(VACINEJA)"].tolist() date_list = current_vaccinated["DATA D1(VACINADOS)"].tolist() # For each person vaccinated at the current date, check if there is a control for he/she. for j in range(0, len(cpf_list)): pair = find_pair(cur_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates) if pair!=-1: matchings[cpf_list[j]] = pair items_matching = matchings.items() pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]}) events_df = self.get_intervals(pareados, df_vaccinated, df_unvaccinated) matched = defaultdict(lambda:False) for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]: matched[cpf]=True df_join["PAREADO"] = df_join["CPF"].apply(lambda x: "SIM" if matched[x] else "NAO") return events_df, df_join def get_intervals(self, df_pairs, df_vac, df_unvac): ''' Description. Args: df_pairs: df_vac: df_unvac: ''' pareado = defaultdict(lambda: False) matched_cpfs = df_pairs["CPF CASO"].tolist()+df_pairs["CPF CONTROLE"].tolist() [ pareado.update({cpf:True}) for cpf in matched_cpfs ] data_teste = defaultdict(lambda: np.nan) data_hospitalizado = defaultdict(lambda:np.nan) data_obito = defaultdict(lambda:np.nan) data_d1 = defaultdict(lambda:np.nan) data_d2 = defaultdict(lambda:np.nan) df_join = pd.concat([df_vac, df_unvac]) for j in range(0, df_join.shape[0]): cpf = df_join["CPF"].iat[j] obito = df_join["DATA OBITO"].iat[j] teste = df_join["DATA SOLICITACAO(TESTES)"].iat[j] hospitalizacao = df_join["DATA HOSPITALIZACAO"].iat[j] d1_dt = df_join["DATA D1(VACINADOS)"].iat[j] d2_dt = df_join["DATA D2(VACINADOS)"].iat[j] if not pd.isna(obito): data_obito[cpf] = obito if not pd.isna(d1_dt): data_d1[cpf] = d1_dt if not pd.isna(d2_dt): data_d2[cpf] = d2_dt if not
pd.isna(teste)
pandas.isna
### ### load libraries ### import cProfile import random import numpy.random as rand import warnings import scipy.io as sio import os import sys import argparse import numpy as np import pandas as pd import geopandas as gpd import sys from IPython.display import Image from shapely.geometry import Point, Polygon from math import factorial import datetime import time import scipy from statsmodels.sandbox.regression.predstd import wls_prediction_std from sklearn.linear_model import LinearRegression from patsy import cr from pprint import pprint import matplotlib.pyplot as plt import seaborn as sb import sys # search path for modules # look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path sys.path.append('/Users/hn/Documents/00_GitHub/Ag/remote_sensing/python/') import remote_sensing_core as remote_core """ # Plan for rough estimate: 1. perennials, and grasses and non irrigated (Why google slide doess not say annuals) """ ################################################################ ##### ##### directories ##### ################################################################ data_dir = "/Users/hn/Documents/01_research_data/Ag_check_point/remote_sensing/01_NDVI_TS/Grant/" ################################################################ ##### ##### Data Reading ##### ################################################################ # # See how you can list all files in the given directory to produce # file_names # file_names = ["Grant_2018_TS.csv"] file_N = file_names[0] a_df =
pd.read_csv(data_dir + file_N)
pandas.read_csv
#!/usr/bin/env python from __future__ import absolute_import, print_function # standard library import pathlib # third party import pandas as pd import numpy as np # project specific # from waldo.conf import settings from waldo import wio from waldo.wio import paths import multiworm import waldo.metrics.report_card as report_card class OutputWriter(object): """ """ def __init__(self, ex_id, graph=None, data_dir=None, output_dir=None): self.ex_id = ex_id if data_dir is None: self.data_dir = paths.experiment(ex_id) else: self.data_dir = pathlib.Path(data_dir) if output_dir is None: self.output_dir = paths.output(ex_id) else: self.output_dir = pathlib.Path(output_dir) self.experiment = wio.Experiment(fullpath=self.data_dir) if graph is None: graph_orig = self.experiment.graph.copy() graph, _ = report_card.iterative_solver(self.experiment, graph_orig) self.graph = graph def export(self, interpolate=False, callback1=None, callback2=None): """ """ ex_id = self.ex_id print('loading summary data') basename, summary_df = self._load_summary() lost_and_found = self._create_lost_and_found() # json.dump(lost_and_found, open('lost_and_found.json', 'w')) # blob_basename = '{bn}'.format(bn=basename) # make this run as last step before saving paths.mkdirp(self.output_dir) print('writing blobs files') file_management = self._write_blob_files(basename=basename, summary_df=summary_df, callback=callback1, interpolate=interpolate) # lost_and_found = json.load(open('lost_and_found.json', 'r')) # file_management = json.load(open('file_m.json', 'r')) print('recreating summary file') summary_lines = self._recreate_summary_file(summary_df, lost_and_found, file_management, callback=callback2) # def _recreate_summary_file(self, summary_df, lost_and_found, file_management, basename='chore'): sum_name = self.output_dir / self.experiment.summary_file.name self._write_summary_lines(sum_name, summary_lines) def _write_summary_lines(self, sum_name, lines): """ Keyword Arguments: lines -- """ print(sum_name, 'is writing') with open(str(sum_name), 'w') as f: for line in lines: f.write(line) def format_number_string(self, num, ndigits=5): s = str(int(num)) for i in range(ndigits): if len(s) < ndigits: s = '0' + s return s def _write_blob_files(self, basename='chore', interpolate='False', summary_df=None, callback=None): experiment = self.experiment graph = self.graph # all_frames = [int(f) for f in summary_df.index] # sdf = summary_df.reindex(all_frames) sdf = summary_df.set_index(0) file_management = {} mashup_history = [] file_counter = 0 # increments every time file is sucessfully written node_length = len(self.graph) for current_node_index, node in enumerate(self.graph): # get save file name in order. # print(node) # print(node_file) if callback: callback(current_node_index / float(node_length)) # start storing blob index into file_manager dict. node_data = graph.node[node] died_f = node_data['died_f'] components = node_data.get('components', []) if not components: components = [node] # print(components) line_data = [] component_record = [] for bid in components: try: lines = [l for l in experiment._blob_lines(int(bid))] except multiworm.core.MWTDataError: continue except ValueError: continue for l in lines: l = l.strip() parts = l.split() if len(parts) < 5: continue # line_data.append({'frame': int(parts[0]), # 'x': float(parts[2]), # 'y': float(parts[3]), # 'bid': bid, # 'line':l}) parts[0] = int(parts[0]) parts[2] = float(parts[2]) parts[3] = float(parts[3]) line_data.append(parts) component_record.append(bid) if not line_data: continue compiled_lines = pd.DataFrame(line_data) compiled_lines.fillna(' ', inplace=True) compiled_lines.rename(columns={0: 'frame', 1: 'time', 2: 'x', 3: 'y'}, inplace=True) mashup_df = compiled_lines[['frame', 'time', 'x', 'y']] mashup_df['node'] = node mashup_df['bid'] = component_record mashup_df = mashup_df[mashup_df.duplicated('frame')] mashup_history.append(mashup_df) compiled_lines.sort('frame', inplace=True) compiled_lines.drop_duplicates('frame', take_last=False, inplace=True) existing_frames = list(compiled_lines['frame']) all_frames = np.arange(int(existing_frames[0]), int(existing_frames[-1]) + 1) all_frames = [int(i) for i in all_frames] # print(all_frames[0], type(all_frames[0])) # If interpolate == True, # this section inserts values into otherwise empty rows if interpolate and len(existing_frames) != len(all_frames): # print(len(existing_frames), 'existing') # print(len(all_frames), 'with gaps filled') # print(compiled_lines.head(2)) compiled_lines.set_index('frame', inplace=True) compiled_lines = compiled_lines.reindex(all_frames) # print(compiled_lines.head()) for f in compiled_lines[compiled_lines['time'].isnull()].index: t = round(sdf.loc[f][1], ndigits=3) # print(f, t, 'fill time') compiled_lines['time'].loc[f] = t compiled_lines['x'] = compiled_lines['x'].interpolate() compiled_lines['y'] = compiled_lines['y'].interpolate() for i in range(10, len(compiled_lines.columns)): compiled_lines[i] = compiled_lines[i].fillna(' ') cl = compiled_lines.fillna(method='ffill') compiled_lines = cl.reset_index() # print(compiled_lines.head()) # Now that actual lines of data found for blob # Store the data and if died_f not in file_management: file_management[died_f] = [] location = '{f}.{pos}'.format(f=file_counter, pos=0) file_management[died_f].extend([[node, location]]) file_number = self.format_number_string(file_counter) node_file = self.output_dir / '{p}_{n}k.blobs'.format(p=self.experiment.basename, n=file_number) with open(str(node_file), 'w') as f: f.write('% {n}\n'.format(n=node)) for j, row in compiled_lines.iterrows(): # print(row) # print(row['line']) row = ' '.join(['{i}'.format(i=i) for i in row]) row = row + '\n' f.write(row) file_counter += 1 # increments every time file is sucessfully written all_mashups =
pd.concat(mashup_history)
pandas.concat
#!/usr/bin/env python3.6 import pandas as pd from collections import defaultdict, Counter import argparse import sys import os import subprocess import re import numpy as np from datetime import datetime from itertools import chain from pyranges import PyRanges from SV_modules import * pd.set_option('display.max_columns', None) pd.set_option('display.expand_frame_repr', False) pd.set_option('max_colwidth', None) pd.options.display.max_rows = 999 class Namespace: def __init__(self, **kwargs): self.__dict__.update(kwargs) def createGeneSyndromeDict(database_df): dict = defaultdict(list) for var, hpo in database_df.itertuples(index=False): # var can either be gene or syndrome dict[var].append(hpo) return(dict) def createWeightDict(weights): try: w_df = pd.read_csv(weights, sep = ' ', names=["HPO_id", "weight"], comment = '#') except OSError: print("Count not open/read the input file:" + weights) sys.exit() weightDict = dict(zip(w_df.HPO_id, w_df.weight)) return(weightDict) def getClinicalPhenome(args): # Get the clinical phenome and store as a set try: clinical_phenome = set(open("./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt").read().splitlines()) except OSError: print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt") sys.exit() return(clinical_phenome) def calculateGeneSumScore(args, hpo_gene_dict, weightDict, clinical_phenome, omim_gene): # Go through genes in genelist found in the patients try: genes = open("./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt", 'r') except OSError: print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt") sys.exit() with genes: gene = genes.read().splitlines() gene_sum_score = 0 gene_score_result = pd.DataFrame(columns=['gene', 'score']) for query in gene: #print(query) hpo_pheno = set(hpo_gene_dict[query]) # To get the phenotypic features for a given gene overlap = hpo_pheno.intersection(clinical_phenome) # overlap all the phenotypic features with the clinical phenomes for term in overlap: gene_sum_score += weightDict[term] gene_score_result = gene_score_result.append({'gene':query, 'score':gene_sum_score}, ignore_index=True) gene_score_result_r = gene_score_result.iloc[::-1] gene_score_result_r = pd.concat([gene_score_result_r, omim_gene]) gene_score_result_r = normalizeRawScore(args, gene_score_result_r, 'gene') return(gene_score_result_r) def getParentsGeno(filtered_intervar, inheritance_mode, ov_allele): # Create two new columns and initialize to 0 filtered_intervar[inheritance_mode] = 0 filtered_intervar = filtered_intervar.reset_index(drop=True) for idx, row in enumerate(filtered_intervar.itertuples(index=False)): if int(getattr(row, 'Start')) in set(ov_allele['Start']): #parents_geno = ov_allele.loc[ov_allele['Start'] == getattr(row, 'Start'), 'geno'].head(1) #print(parents_geno) parents_geno = ov_allele.loc[ov_allele['Start']==getattr(row,'Start'),'geno'].head(1).item() filtered_intervar.loc[idx, inheritance_mode] = parents_geno return(filtered_intervar) def rerankSmallVariant(df): df['Clinvar_idx'] = df.Clinvar.str[9:-1] df['InterVar_idx'] = df.InterVar_InterVarandEvidence.str[10:].str.split('PVS1').str[0] df[['Clinvar_idx', 'InterVar_idx']] = df[['Clinvar_idx', 'InterVar_idx']].apply(lambda x:x.astype(str).str.lower()) df['Clinvar_score'], df['InterVar_score'] = 3, 3 # Calculate Clinvar score df.loc[(df['Clinvar_idx'].str.contains('benign')), 'Clinvar_score'] = 1 df.loc[((df['Clinvar_idx'].str.contains('benign')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 2 df.loc[(df['Clinvar_idx'].str.contains('pathogenic')), 'Clinvar_score'] = 5 df.loc[((df['Clinvar_idx'].str.contains('pathogenic')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 4 df.loc[(df['Clinvar_idx'].str.contains('conflicting')), 'Clinvar_score'] = 3 # Calculate Intervar score df.loc[(df['InterVar_idx'].str.contains('benign')), 'InterVar_score'] = 1 df.loc[((df['InterVar_idx'].str.contains('benign')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 2 df.loc[(df['InterVar_idx'].str.contains('pathogenic')), 'InterVar_score'] = 5 df.loc[((df['InterVar_idx'].str.contains('pathogenic')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 4 # Add them up df['Patho_score'] = df['Clinvar_score'] + df['InterVar_score'] # Sort by the total patho_score df = df.sort_values(by=['Patho_score', 'score'], ascending=False) df = df.drop(['Clinvar_idx', 'InterVar_idx', 'Clinvar_score', 'InterVar_score', 'Patho_score'], axis=1) return df def smallVariantGeneOverlapCheckInheritance(args, smallVariantFile, interVarFinalFile, gene_score_result_r, famid): # Overlap gene_score_result_r with small variants genes found in the proband gene_score_result_r = gene_score_result_r[gene_score_result_r.gene.isin(smallVariantFile.gene)] # Subset the intervar files further to store entries relevant to these set of genes filtered_intervar = pd.merge(interVarFinalFile, gene_score_result_r, left_on='Ref_Gene', right_on='gene',how='inner') # Remove common artifacts try: artifacts = pd.read_csv("./common_artifacts_20.txt", names = ["gene"]) filtered_intervar = filtered_intervar.loc[~filtered_intervar['Ref_Gene'].isin(artifacts['gene'])] except OSError: print("Could not open/read the input file: common_artifacts_20.txt") sys.exit() # If custom artifact bed file is provided, filter dataframe if os.path.exists(args.artifact): #print(filtered_intervar) custom_artifact = pd.read_csv(args.artifact, sep='\t', usecols=[0, 2] ,names=["Chr", "End"]) keys = list(custom_artifact.columns.values) i1 = filtered_intervar.set_index(keys).index i2 = custom_artifact.set_index(keys).index filtered_intervar = filtered_intervar.loc[~i1.isin(i2)] # Create a bed file and write it out pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariant_candidates.txt', index=False, sep='\t',header=False) # Write out a subset of the variant first filtered_intervar_bed = filtered_intervar[['Chr', 'Start', 'End']] filtered_intervar_bed.loc[:,'Chr'] = 'chr' + filtered_intervar_bed.loc[:,'Chr'].astype(str) filtered_intervar_bed.loc[:,'Start'] -= 1 pd.DataFrame(filtered_intervar_bed).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_target.bed', index=False, sep='\t', header=False) # Create two new columns and initialize to -1 # will later get overwritten to 0/1/2 if parents vcf files are provided filtered_intervar['paternal'] = -1 filtered_intervar['maternal'] = -1 if args.type != 'singleton': # Get overlapping variants from the parents so we know which variants are inherited print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Comparing small variants (SNPs/indels) inheritance') cmd1 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.fathervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf" cmd2 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.mothervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf" if args.type == 'duo': if args.father_duo: cmds = [cmd1] else: cmds = [cmd2] else: cmds = [cmd1, cmd2] for cmd in cmds: p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise Exception(stderr) # Go through every row in filtered_intervar and see if the same variant is found in either of the parents # We will only compare allele start position (we always assume the alt allele is the same) if args.type=='trio' or args.father_duo: try: paternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#') paternal_ov_allele['geno'] = paternal_ov_allele['geno'].str[:1].astype(int) + paternal_ov_allele['geno'].str[2:3].astype(int) filtered_intervar = getParentsGeno(filtered_intervar, 'paternal', paternal_ov_allele) except OSError: print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf") sys.exit() if args.type=="trio" or args.mother_duo: try: maternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#') maternal_ov_allele['geno'] = maternal_ov_allele['geno'].str[:1].astype(int) + maternal_ov_allele['geno'].str[2:3].astype(int) filtered_intervar = getParentsGeno(filtered_intervar, 'maternal', maternal_ov_allele) except OSError: print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf") sys.exit() # Rerank variants based on reported or predicted pathogeneicity filtered_intervar = rerankSmallVariant(filtered_intervar) if args.type=='trio': # Divide the dataset into recessive, dominant, de novo, compound het ## Recessive recessive = filtered_intervar[(filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 1) & (filtered_intervar['Otherinfo'] == 'hom')] ## Dominant dominant_inherited = filtered_intervar[((filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 0)) | ((filtered_intervar['maternal'] == 1) & (filtered_intervar['paternal'] == 0))] ## De novo denovo = filtered_intervar[(filtered_intervar['paternal'] == 0) & (filtered_intervar['maternal'] == 0)] #Compound het filtered_intervar_compoundhet = filtered_intervar[(filtered_intervar['Otherinfo'] == 'het')] filtered_intervar_compoundhet = filtered_intervar_compoundhet[(filtered_intervar_compoundhet['maternal'] != 2) & (filtered_intervar_compoundhet['paternal'] != 2) & ((filtered_intervar_compoundhet['paternal'] == 1) & (filtered_intervar_compoundhet['maternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 1) & (filtered_intervar_compoundhet['paternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 0) & (filtered_intervar_compoundhet['paternal'] == 0))] count = Counter(filtered_intervar_compoundhet['Ref_Gene']) compoundhet_genes = [x for x, cnt in count.items() if cnt > 1] compoundhet = filtered_intervar_compoundhet[filtered_intervar_compoundhet['Ref_Gene'].isin(compoundhet_genes)] discard = [] for gene in compoundhet_genes: df = compoundhet[compoundhet['Ref_Gene'].str.contains(gene)] row_count = len(df.index) col_list = ['paternal', 'maternal'] res = df[col_list].sum(axis=0) if ((res[0] == 0) & (res[1] == row_count)) or (res[1] == 0 & (res[0] == row_count)): discard.append(gene) compoundhet = compoundhet[~compoundhet['Ref_Gene'].isin(discard)] # Print all the variants according to inheritance mode # Recessive
pd.DataFrame(recessive)
pandas.DataFrame
from datetime import datetime import numpy as np from pandas.tseries.frequencies import get_freq_code as _gfc from pandas.tseries.index import DatetimeIndex, Int64Index from pandas.tseries.tools import parse_time_string import pandas.tseries.frequencies as _freq_mod import pandas.core.common as com import pandas.core.datetools as datetools from pandas._tseries import Timestamp import pandas._tseries as lib #--------------- # Period logic def to_period(arg, freq=None): """ Attempts to convert arg to timestamp """ if arg is None: return arg if type(arg) == float: raise TypeError("Cannot convert a float to period") return Period(arg, freq=freq) class Period(object): def __init__(self, value=None, freq=None, year=None, month=1, quarter=None, day=1, hour=0, minute=0, second=0): """ Represents an period of time Parameters ---------- value : Period or basestring, default None The time period represented (e.g., '4Q2005') freq : str, default None e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes year : int, default None month : int, default 1 quarter : int, default None day : int, default 1 hour : int, default 0 minute : int, default 0 second : int, default 0 """ # freq points to a tuple (base, mult); base is one of the defined # periods such as A, Q, etc. Every five minutes would be, e.g., # ('T', 5) but may be passed in as a string like '5T' self.freq = None # ordinal is the period offset from the gregorian proleptic epoch self.ordinal = None if value is None: if freq is None: raise ValueError("If value is None, freq cannot be None") if year is None: raise ValueError("If value is None, year cannot be None") if quarter is not None: month = (quarter - 1) * 3 + 1 base, mult = _gfc(freq) self.ordinal = lib.period_ordinal(year, month, day, hour, minute, second, base, mult) elif isinstance(value, Period): other = value if freq is None or _gfc(freq) == _gfc(other.freq): self.ordinal = other.ordinal freq = other.freq else: converted = other.asfreq(freq) self.ordinal = converted.ordinal elif isinstance(value, basestring): value = value.upper() dt, parsed, reso =
parse_time_string(value)
pandas.tseries.tools.parse_time_string
# coding=utf-8 # <NAME> # <EMAIL> # 2022-04-30 # 100 Days of Code: The Complete Python Pro Bootcamp for 2022 # Day 74 - Google Trends Data import pandas import matplotlib.pyplot as plt # Import data bitcoin_search_df = pandas.read_csv("data/Bitcoin Search Trend.csv") bitcoin_search_df.name = "bitcoin_search" bitcoin_price_df = pandas.read_csv("data/Daily Bitcoin Price.csv") bitcoin_price_df.name = "bitcoin_price" tesla_search_df = pandas.read_csv("data/TESLA Search Trend vs Price.csv") tesla_search_df.name = "tesla_search" ue_benefit_search_2004_19_df = pandas.read_csv("data/UE Benefits Search vs UE Rate 2004-19.csv") ue_benefit_search_2004_19_df.name = "ue_benefit_search_2004_19" ue_benefit_search_2004_20_df = pandas.read_csv("data/UE Benefits Search vs UE Rate 2004-20.csv") ue_benefit_search_2004_20_df.name = "ue_benefit_search_2004_20" df_list = [ bitcoin_search_df, bitcoin_price_df, tesla_search_df, ue_benefit_search_2004_19_df, ue_benefit_search_2004_19_df ] # What are the shapes of the DataFrames? for i in df_list: print("\nShape for " + str(i.name) + ": (rows, columns)") print(i.shape) # How many rows & columns do they have? # Ditto above # What are the column names? for i in df_list: print("\nColumns for " + str(i.name) + ":") print(i.columns) # What is the largest number in the search data column? Try using the .describe() function. # BTC print("\nLargest number in search data column for Bitcoin:") print(bitcoin_search_df['BTC_NEWS_SEARCH'].max()) # Tesla print("\nLargest number in search data column for Tesla:") print(tesla_search_df['TSLA_WEB_SEARCH'].max()) # What is the periodicity of the time series data (daily, weekly, monthly)? for i in df_list: print("\nPeriodicity for " + str(i.name) + ":") print(i.columns[0]) # Can you investigate all 4 DataFrames and find if there are any missing values? # If yes, find how many missing or NaN values there are. Then, find the row where the missing values occur. for i in df_list: print("\nChecking for missing values in " + str(i.name) + ":") print(i.isna().values.any()) print("Sum: " + str(i.isna().values.sum())) # Finally, remove any rows that contain missing values. for i in df_list: print("\nDropping rows with missing values in " + str(i.name) + "...") i.dropna(inplace=True) # Convert any strings you find into Datetime objects. Do this for all 4 DataFrames. Double-check if your type # conversion was successful. for i in (bitcoin_search_df, tesla_search_df, ue_benefit_search_2004_19_df, ue_benefit_search_2004_20_df): i["ORIGINAL_DATE"] =
pandas.to_datetime(i["MONTH"])
pandas.to_datetime
import pandas as pd from datetime import datetime from requests import get from bs4 import BeautifulSoup def get_schedule(season, playoffs=False): months = ['October', 'November', 'December', 'January', 'February', 'March', 'April', 'May', 'June'] if season==2020: months = ['October-2019', 'November', 'December', 'January', 'February', 'March', 'July', 'August', 'September', 'October-2020'] df = pd.DataFrame() for month in months: r = get(f'https://www.basketball-reference.com/leagues/NBA_{season}_games-{month.lower()}.html') if r.status_code==200: soup = BeautifulSoup(r.content, 'html.parser') table = soup.find('table', attrs={'id': 'schedule'}) month_df = pd.read_html(str(table))[0] df = df.append(month_df) df = df.reset_index() cols_to_remove = [i for i in df.columns if 'Unnamed' in i] cols_to_remove += [i for i in df.columns if 'Notes' in i] cols_to_remove += [i for i in df.columns if 'Start' in i] cols_to_remove += [i for i in df.columns if 'Attend' in i] cols_to_remove += ['index'] df = df.drop(cols_to_remove, axis=1) df.columns = ['DATE', 'VISITOR', 'VISITOR_PTS', 'HOME', 'HOME_PTS'] if season==2020: df = df[df['DATE']!='Playoffs'] df['DATE'] = df['DATE'].apply(lambda x: pd.to_datetime(x)) df = df.sort_values(by='DATE') df = df.reset_index().drop('index', axis=1) playoff_loc = df[df['DATE']==
pd.to_datetime('2020-08-17')
pandas.to_datetime
import pandas as pd import hashlib import matplotlib.pyplot as plt MB = 1024 * 1024 GB = 1024 * MB TB = 1024 * GB PB = 1024 * TB PER_FILE_RATE = GB / 8 / 3 # one Gbps THROUGHPUT_BIN = 1800 # in seconds PER_FILE_RATE *= THROUGHPUT_BIN def load_data(sites, periods, kinds, skipFiles=[]): all_data = pd.DataFrame() counts = [] for site in sites: for month in periods: for kind in kinds: site_data = pd.read_hdf("/data/gduck/" + month + '/' + site + '_' + kind + '_' + month + '.h5', key=site, mode='r') site_data = site_data.astype({"transfer_start": float}) site_data['site'] = 'xc_' + site nfiles = site_data.filesize.count() print(site, month, kind, nfiles) ufiles = site_data.index.unique().shape[0] totsize = site_data.filesize.sum() / PB avgfilesize = site_data.filesize.mean() / GB all_data = pd.concat([all_data, site_data]) counts.append([site, month, kind, nfiles, ufiles, totsize, avgfilesize]) df = pd.DataFrame(counts, columns=['site', 'month', 'kind', 'files', 'unique files', 'total size [PB]', 'avg. filesize [GB]']) print(df) if len(counts) == 1: return all_data print('---------- merged data -----------') print(all_data.shape[0], 'files\t', all_data.index.unique().shape[0], 'unique\t', all_data.filesize.sum() / PB, "PB\t", all_data.filesize.mean() / GB, "GB avg. file size") all_data = all_data.sort_values('transfer_start') if len(skipFiles) == 0: return all_data for rem in skipFiles: print('removing: ', rem) all_data = all_data[~all_data.index.str.contains(rem)] print('---------- after removing files not to cache -----------') print(all_data.shape[0], 'files\t', all_data.index.unique().shape[0], 'unique\t', all_data.filesize.sum() / PB, "PB\t", all_data.filesize.mean() / GB, "GB avg. file size") return all_data class XCacheServer(object): def __init__(self, size=TB, lwm=0.85, hwm=0.95): self.size = size self.lwm_bytes = size * lwm self.hwm_bytes = size * hwm self.lwm = lwm self.hwm = hwm self.cleanups = 0 self.files = {} self.used = 0 def add_request(self, fn, fs, ts): if fn in self.files: self.files[fn][1] += 1 self.files[fn][2] = ts return True else: if self.used + fs > self.hwm_bytes: self.clean() self.files[fn] = [fs, 1, ts] self.used += fs return False def clean(self): # print("cleaning...") self.cleanups += 1 df = pd.DataFrame.from_dict(self.files, orient='index') df.columns = ['filesize', 'accesses', 'access_time'] # here access time is last time file was accessed, sort it in ascending order. df.sort_values(['access_time'], ascending=[True], inplace=True) df['cum_sum'] = df.filesize.cumsum() # print('files in cache:', df.shape[0], end=' ') df = df[df.cum_sum < (self.hwm_bytes - self.lwm_bytes)] # print('files to flush:', df.shape[0]) for fn in df.index.values: cr = self.files.pop(fn) self.used -= cr[0] def get_stats(self): df = pd.DataFrame.from_dict(self.files, orient='index') df.columns = ['filesize', 'accesses', 'access_time'] return [self.cleanups, df.filesize.mean(), df.accesses.mean(), df.access_time.max() - df.access_time.mean()] class XCacheSite(object): def __init__(self, name, upstream='Origin', servers=1, size=TB, lwm=0.85, hwm=0.95): """ cache size is in bytes """ self.name = name self.upstream = upstream self.nservers = servers self.server_size = size self.lwm = lwm self.hwm = hwm self.hits = 0 self.requests = 0 self.data_from_cache = 0 self.data_asked_for = 0 self.servers = [] self.throughput = {} self.init() def init(self): for s in range(self.nservers): self.servers.append(XCacheServer(self.server_size, self.lwm, self.hwm)) def add_request(self, fn, fs, ts): # determine server self.requests += 1 self.data_asked_for += fs # add egress sizebin = fs timebin = ts // THROUGHPUT_BIN while True: if timebin not in self.throughput: self.throughput[timebin] = [0, 0] if sizebin - PER_FILE_RATE > 0: self.throughput[timebin][0] += PER_FILE_RATE sizebin -= PER_FILE_RATE timebin += 1 else: self.throughput[timebin][0] += sizebin break if self.name == 'Origin': self.hits += 1 self.data_from_cache += fs return True server = int(hashlib.md5(fn.encode('utf-8')).hexdigest(), 16) % self.nservers found = self.servers[server].add_request(fn, fs, ts) if found: self.hits += 1 self.data_from_cache += fs else: # add ingress sizebin = fs timebin = ts // THROUGHPUT_BIN while True: if sizebin - PER_FILE_RATE > 0: self.throughput[timebin][1] += PER_FILE_RATE sizebin -= PER_FILE_RATE timebin += 1 else: self.throughput[timebin][1] += sizebin break return found def get_servers_stats(self): data = [] for s in self.servers: data.append(s.get_stats()) df =
pd.DataFrame(data)
pandas.DataFrame
import ast import numpy as np import pandas as pd from pathlib import Path from itertools import zip_longest def from_np_array(array_string): array_string = ','.join(array_string.replace('[ ', '[').split()) return np.array(ast.literal_eval(array_string)) class Dataset: EPISODE = 'episode' REWARD = 'reward' STATE = 'state' ACTION = 'action' NEW = 'new_state' FAILED = 'failed' DONE = 'done' _INDEX = 'index' DEFAULT_COLUMNS = [EPISODE, REWARD, STATE, ACTION, NEW, FAILED, DONE] DEFAULT_COLUMNS_WO_EPISODE = [REWARD, STATE, ACTION, NEW, FAILED, DONE] DEFAULT_ARRAY_CAST = [STATE, ACTION, NEW] def __init__(self, *columns, group_name=None, name=None): self.group_name = group_name if group_name is not None\ else self.EPISODE self.columns = self.DEFAULT_COLUMNS if len(columns) == 0\ else list(columns) self.columns_wo_group = [cname for cname in self.columns if cname != self.group_name] self.columns = [self.group_name] + self.columns_wo_group self.df = pd.DataFrame(columns=self.columns) self.df.index.name = Dataset._INDEX self.name = name def __getattr__(self, item): if item in self.__dict__: return getattr(self, item) return getattr(self.df, item) def _complete_args(self, args): return [[arg] for _, arg in zip_longest(self.columns, args)] def _list_wrap(self, args): if isinstance(args, dict): return {argname: [arg] for argname, arg in args.items()} else: return [[arg] for arg in args] def add_entry(self, *args, **kwargs): entry = {kw: [arg] for kw, arg in zip(self.columns, args)} entry.update({kw: [arg] for kw, arg in kwargs.items()}) self.df = self.df.append(pd.DataFrame(entry), ignore_index=True) def add_group(self, group, group_number=None): if group.get(self.group_name) is None: if group_number is None: group_number = self.df[self.group_name].max() + 1 if pd.isna(group_number): group_number = 0 group_length = len(group[list(group.keys())[0]]) group = group.copy() group.update({ self.group_name: [group_number]*group_length }) self.df = self.df.append(pd.DataFrame(group), ignore_index=True) def save(self, filepath): filepath = Path(filepath) if filepath.is_dir(): filepath = filepath / (self.name + '.csv') with filepath.open('w') as f: self.df.to_csv(f) @staticmethod def _get_index_key(df): dflt_idx = 'Unnamed: 0' return dflt_idx if dflt_idx in list(df.columns) else Dataset._INDEX @staticmethod def load(filepath, *array_cast, group_name=None): if len(array_cast) == 0: array_cast = Dataset.DEFAULT_ARRAY_CAST filepath = Path(filepath) converters = {cname: from_np_array for cname in array_cast} with filepath.open('r') as f: df =
pd.read_csv(f, converters=converters)
pandas.read_csv
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 25 21:13:58 2020 @author: sarakohnke """ #Set working directory import os path="/Users/sarakohnke/Desktop/data_type_you/interim-tocsv" os.chdir(path) os.getcwd() #Import required packages import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns #Import data for files with demographic information #(This is after converted .xpt to .csv) #import 7 demo files demo05=pd.read_csv('DEMO_D_NHANES_Demographics_2005.csv') demo07=pd.read_csv('DEMO_E_NHANES_Demographics_2007.csv') demo09=pd.read_csv('DEMO_F_NHANES_Demographics_2009.csv') demo11=pd.read_csv('DEMO_G_NHANES_Demographics_2011.csv') demo13=pd.read_csv('DEMO_H_NHANES_Demographics_2013.csv') demo15=pd.read_csv('DEMO_I_NHANES_Demographics_2015.csv') demo17=pd.read_csv('DEMO_J_NHANES_Demographics_2017.csv') #add year as a column demo05['Year']=2005 demo07['Year']=2007 demo09['Year']=2009 demo11['Year']=2011 demo13['Year']=2013 demo15['Year']=2015 demo17['Year']=2017 #append all dfs together demographics_allyears=demo05.append(demo07, ignore_index = True) demographics_allyears=demographics_allyears.append(demo09, ignore_index = True) demographics_allyears=demographics_allyears.append(demo11, ignore_index = True) demographics_allyears=demographics_allyears.append(demo13, ignore_index = True) demographics_allyears=demographics_allyears.append(demo15, ignore_index = True) demographics_allyears=demographics_allyears.append(demo17, ignore_index = True) #select only desired cols demographics_allyears2=demographics_allyears[['SEQN','RIAGENDR','RIDAGEYR']].copy() #rename cols demographics_allyears2.rename(columns={'SEQN':'Patient ID', 'RIAGENDR':'Male', 'RIDAGEYR':'Age (years)'}, inplace=True) #see if there are unknowns (eg 777) demographics_allyears2['Age (years)'].value_counts().sort_index() #replace 2 with 0 for female demographics_allyears2['Male'].replace(2,0,inplace=True) #drop rows with nas demographics_allyears3=demographics_allyears2.dropna(axis=0) #filter for adults demographics_allyears4=demographics_allyears3[demographics_allyears3['Age (years)']>=18] #Import data for files with blood pressure information #import 7 bp files bp05=pd.read_csv('BPX_D_NHANES_Blood_Pressure_2005.csv') bp07=pd.read_csv('BPX_E_NHANES_Blood_Pressure_2007.csv') bp09=pd.read_csv('BPX_F_NHANES_Blood_Pressure_2009.csv') bp11=pd.read_csv('BPX_G_NHANES_Blood_Pressure_2011.csv') bp13=pd.read_csv('BPX_H_NHANES_Blood_Pressure_2013.csv') bp15=pd.read_csv('BPX_I_NHANES_Blood_Pressure_2015.csv') bp17=pd.read_csv('BPX_J_NHANES_Blood_Pressure_2017.csv') #add year as a column bp05['Year']=2005 bp07['Year']=2007 bp09['Year']=2009 bp11['Year']=2011 bp13['Year']=2013 bp15['Year']=2015 bp17['Year']=2017 #append all dfs together bp_allyears=bp05.append(bp07, ignore_index = True) bp_allyears=bp_allyears.append(bp09, ignore_index = True) bp_allyears=bp_allyears.append(bp11, ignore_index = True) bp_allyears=bp_allyears.append(bp13, ignore_index = True) bp_allyears=bp_allyears.append(bp15, ignore_index = True) bp_allyears=bp_allyears.append(bp17, ignore_index = True) #select only desired cols bp_allyears2=bp_allyears[['SEQN','BPXPLS','BPXSY1','BPXDI1']].copy() #rename cols bp_allyears2.rename(columns={'SEQN':'Patient ID', 'BPXPLS':'Pulse (60sec)', 'BPXSY1':'Systolic pressure (mmHg)', 'BPXDI1':'Diastolic pressure (mmHg)'}, inplace=True) #see if there are unknowns (eg 777) bp_allyears2['Systolic pressure (mmHg)'].value_counts().sort_index() #replace values that don't make sense with NaNs bp_allyears2['Pulse (60sec)'].replace(0,np.nan,inplace=True) bp_allyears2['Pulse (60sec)'].value_counts().sort_index() bp_allyears2['Diastolic pressure (mmHg)'].replace([0,2,4,6,8,10,12,14,16,18],np.nan,inplace=True) bp_allyears2['Diastolic pressure (mmHg)'].value_counts().sort_index() #drop rows with nas bp_allyears3=bp_allyears2.dropna(axis=0) #Import data for files with body measure information #import 7 body measure files bm05=pd.read_csv('BMX_D_NHANES_Body_Measures_2005.csv') bm07=pd.read_csv('BMX_E_NHANES_Body_Measures_2007.csv') bm09=pd.read_csv('BMX_F_NHANES_Body_Measures_2009.csv') bm11=pd.read_csv('BMX_G_NHANES_Body_Measures_2011.csv') bm13=pd.read_csv('BMX_H_NHANES_Body_Measures_2013.csv') bm15=pd.read_csv('BMX_I_NHANES_Body_Measures_2015.csv') bm17=pd.read_csv('BMX_J_NHANES_Body_Measures_2017.csv') #add year as a column bm05['Year']=2005 bm07['Year']=2007 bm09['Year']=2009 bm11['Year']=2011 bm13['Year']=2013 bm15['Year']=2015 bm17['Year']=2017 #append all dfs together bm_allyears=bm05.append(bm07, ignore_index = True) bm_allyears=bm_allyears.append(bm09, ignore_index = True) bm_allyears=bm_allyears.append(bm11, ignore_index = True) bm_allyears=bm_allyears.append(bm13, ignore_index = True) bm_allyears=bm_allyears.append(bm15, ignore_index = True) bm_allyears=bm_allyears.append(bm17, ignore_index = True) #select only desired cols bm_allyears2=bm_allyears[['SEQN','BMXBMI']].copy() #rename cols bm_allyears2.rename(columns={'SEQN':'Patient ID', 'BMXBMI':'BMI (kg/m2)'}, inplace=True) #see if there are unknowns (eg 777) bm_allyears2['BMI (kg/m2)'].value_counts().sort_index() #drop rows with nas bm_allyears3=bm_allyears2.dropna(axis=0) #Import data for files with total cholesterol information #import 7 chol files chol05=pd.read_csv('TCHOL_D_NHANES_Total_Cholesterol_2005.csv') chol07=pd.read_csv('TCHOL_E_NHANES_Total_Cholesterol_2007.csv') chol09=pd.read_csv('TCHOL_F_NHANES_Total_Cholesterol_2009.csv') chol11=pd.read_csv('TCHOL_G_NHANES_Total_Cholesterol_2011.csv') chol13=pd.read_csv('TCHOL_H_NHANES_Total_Cholesterol_2013.csv') chol15=pd.read_csv('TCHOL_I_NHANES_Total_Cholesterol_2015.csv') chol17=pd.read_csv('TCHOL_J_NHANES_Total_Cholesterol_2017.csv') #add year as a column chol05['Year']=2005 chol07['Year']=2007 chol09['Year']=2009 chol11['Year']=2011 chol13['Year']=2013 chol15['Year']=2015 chol17['Year']=2017 #append all dfs together chol_allyears=chol05.append(chol07, ignore_index = True) chol_allyears=chol_allyears.append(chol09, ignore_index = True) chol_allyears=chol_allyears.append(chol11, ignore_index = True) chol_allyears=chol_allyears.append(chol13, ignore_index = True) chol_allyears=chol_allyears.append(chol15, ignore_index = True) chol_allyears=chol_allyears.append(chol17, ignore_index = True) #select only desired cols chol_allyears2=chol_allyears[['SEQN','LBXTC']].copy() #rename cols chol_allyears2.rename(columns={'SEQN':'Patient ID', 'LBXTC':'Total cholesterol (mg/dl)'}, inplace=True) #see if there are unknowns (eg 777) chol_allyears2['Total cholesterol (mg/dl)'].value_counts().sort_index() #drop rows with nas chol_allyears3=chol_allyears2.dropna(axis=0) #Import data for files with blood count information #import 7 blood count files cbc05=pd.read_csv('CBC_D_NHANES_Complete_Blood_Count_2005.csv') cbc07=pd.read_csv('CBC_E_NHANES_Complete_Blood_Count_2007.csv') cbc09=pd.read_csv('CBC_F_NHANES_Complete_Blood_Count_2009.csv') cbc11=pd.read_csv('CBC_G_NHANES_Complete_Blood_Count_2011.csv') cbc13=pd.read_csv('CBC_H_NHANES_Complete_Blood_Count_2013.csv') cbc15=pd.read_csv('CBC_I_NHANES_Complete_Blood_Count_2015.csv') cbc17=pd.read_csv('CBC_J_NHANES_Complete_Blood_Count_2017.csv') #add year as a column cbc05['Year']=2005 cbc07['Year']=2007 cbc09['Year']=2009 cbc11['Year']=2011 cbc13['Year']=2013 cbc15['Year']=2015 cbc17['Year']=2017 #append all dfs together cbc_allyears=cbc05.append(cbc07, ignore_index = True) cbc_allyears=cbc_allyears.append(cbc09, ignore_index = True) cbc_allyears=cbc_allyears.append(cbc11, ignore_index = True) cbc_allyears=cbc_allyears.append(cbc13, ignore_index = True) cbc_allyears=cbc_allyears.append(cbc15, ignore_index = True) cbc_allyears=cbc_allyears.append(cbc17, ignore_index = True) #select only desired cols cbc_allyears2=cbc_allyears[['SEQN','LBXMPSI','LBXPLTSI','LBXRBCSI','LBDEONO', 'LBDLYMNO','LBDBANO','LBDMONO']].copy() #rename cols cbc_allyears2.rename(columns={'SEQN':'Patient ID', 'LBXMPSI':'Mean platelet volume (fL)', 'LBXPLTSI':'Platelet count (1000 cells/uL)', 'LBXRBCSI':'Red blood cell count (million cells/uL)', 'LBDEONO':'Eosinophils number (1000 cells/uL)', 'LBDLYMNO':'Lymphocyte number (1000 cells/uL)', 'LBDBANO':'Basophils number (1000 cells/uL)', 'LBDMONO':'Monocyte number (1000 cells/uL)'}, inplace=True) #see if there are unknowns (eg 777) cbc_allyears2['Monocyte number (1000 cells/uL)'].value_counts().sort_index() #drop rows with nas cbc_allyears3=cbc_allyears2.dropna(axis=0) #Import data for files with A1c/glycohemoglobin information #import 7 a1c files a1c05=pd.read_csv('GHB_D_NHANES_A1C_2005.csv') a1c07=pd.read_csv('GHB_E_NHANES_A1C_2007.csv') a1c09=pd.read_csv('GHB_F_NHANES_A1C_2009.csv') a1c11=pd.read_csv('GHB_G_NHANES_A1C_2011.csv') a1c13=pd.read_csv('GHB_H_NHANES_A1C_2013.csv') a1c15=pd.read_csv('GHB_I_NHANES_A1C_2015.csv') a1c17=pd.read_csv('GHB_J_NHANES_A1C_2017.csv') #add year as a column a1c05['Year']=2005 a1c07['Year']=2007 a1c09['Year']=2009 a1c11['Year']=2011 a1c13['Year']=2013 a1c15['Year']=2015 a1c17['Year']=2017 #append all dfs together a1c_allyears=a1c05.append(a1c07, ignore_index = True) a1c_allyears=a1c_allyears.append(a1c09, ignore_index = True) a1c_allyears=a1c_allyears.append(a1c11, ignore_index = True) a1c_allyears=a1c_allyears.append(a1c13, ignore_index = True) a1c_allyears=a1c_allyears.append(a1c15, ignore_index = True) a1c_allyears=a1c_allyears.append(a1c17, ignore_index = True) #rename cols a1c_allyears.rename(columns={'SEQN':'Patient ID', 'LBXGH':'A1C (%)'}, inplace=True) #see if there are unknowns (eg 777) a1c_allyears['A1C (%)'].value_counts().sort_index() #drop rows with nas a1c_allyears2=a1c_allyears.dropna(axis=0) #Import data for files with standard bio information #import 7 standard bio files sb05=pd.read_csv('BIOPRO_D_NHANES_Standard_Biochemistry_Profile_2005.csv') sb07=pd.read_csv('BIOPRO_E_NHANES_Standard_Biochemistry_Profile_2007.csv') sb09=pd.read_csv('BIOPRO_F_NHANES_Standard_Biochemistry_Profile_2009.csv') sb11=pd.read_csv('BIOPRO_G_NHANES_Standard_Biochemistry_Profile_2011.csv') sb13=pd.read_csv('BIOPRO_H_NHANES_Standard_Biochemistry_Profile_2013.csv') sb15=pd.read_csv('BIOPRO_I_NHANES_Standard_Biochemistry_Profile_2015.csv') sb17=pd.read_csv('BIOPRO_J_NHANES_Standard_Biochemistry_Profile_2017.csv') #add year as a column sb05['Year']=2005 sb07['Year']=2007 sb09['Year']=2009 sb11['Year']=2011 sb13['Year']=2013 sb15['Year']=2015 sb17['Year']=2017 #append all dfs together sb_allyears=sb05.append(sb07, ignore_index = True) sb_allyears=sb_allyears.append(sb09, ignore_index = True) sb_allyears=sb_allyears.append(sb11, ignore_index = True) sb_allyears=sb_allyears.append(sb13, ignore_index = True) sb_allyears=sb_allyears.append(sb15, ignore_index = True) sb_allyears=sb_allyears.append(sb17, ignore_index = True) #select only desired cols sb_allyears2=sb_allyears[['SEQN','LBXSKSI','LBXSNASI','LBXSGB','LBXSPH', 'LBXSUA','LBXSTR','LBXSTB','LBXSLDSI','LBXSIR','LBXSGTSI', 'LBXSC3SI','LBXSCA','LBXSBU','LBXSAPSI','LBXSATSI','LBXSAL']].copy() #rename cols sb_allyears2.rename(columns={'SEQN':'Patient ID', 'LBXSKSI':'Potassium (mmol/L)', 'LBXSNASI':'Sodium (mmol/L)', 'LBXSGB':'Globulin (g/dL)', 'LBXSPH':'Phosphorus (mg/dL)', 'LBXSUA':'Uric acid (mg/dL)', 'LBXSTR':'Triglycerides (mg/dL)', 'LBXSTB':'Bilirubin (mg/dL)', 'LBXSLDSI':'Lactate dehydrogenase (IU/L)', 'LBXSIR':'Iron (ug/dL)', 'LBXSGTSI':'Gamma glutamyl transferase (IU/L)', 'LBXSC3SI':'Bicarbonate (mmol/L)', 'LBXSCA':'Calcium (mg/dL)', 'LBXSBU':'Blood Urea Nitrogen (mg/dL)', 'LBXSAPSI':'Alkaline phosphatase (IU/L)', 'LBXSATSI':'Alanine aminotransferase (IU/L)', 'LBXSAL':'Albumin (g/dL)'}, inplace=True) #see if there are unknowns (eg 777) sb_allyears2['Albumin (g/dL)'].value_counts().sort_index() #replace values that don't make sense with NaNs sb_allyears2['Triglycerides (mg/dL)'].replace(6057,np.nan,inplace=True) sb_allyears2['Triglycerides (mg/dL)'].value_counts().sort_index() sb_allyears2['Alanine aminotransferase (IU/L)'].replace(1363,np.nan,inplace=True) sb_allyears2['Alanine aminotransferase (IU/L)'].value_counts().sort_index() #drop rows with nas sb_allyears3=sb_allyears2.dropna(axis=0) #Import data for files with diet information #import 7 diet files diet05=pd.read_csv('DBQ_D_NHANES_Diet_Behavior_and_Nutrition_2005.csv') diet07=pd.read_csv('DBQ_E_NHANES_Diet_Behavior_and_Nutrition_2007.csv') diet09=pd.read_csv('DBQ_F_NHANES_Diet_Behavior_and_Nutrition_2009.csv') diet11=pd.read_csv('DBQ_G_NHANES_Diet_Behavior_and_Nutrition_2011.csv') diet13=pd.read_csv('DBQ_H_NHANES_Diet_Behavior_and_Nutrition_2013.csv') diet15=pd.read_csv('DBQ_I_NHANES_Diet_Behavior_and_Nutrition_2015.csv') diet17=pd.read_csv('DBQ_J_NHANES_Diet_Behavior_and_Nutrition_2017.csv') #add year as a column diet05['Year']=2005 diet07['Year']=2007 diet09['Year']=2009 diet11['Year']=2011 diet13['Year']=2013 diet15['Year']=2015 diet17['Year']=2017 #append all dfs together diet_allyears=diet05.append(diet07, ignore_index = True) diet_allyears=diet_allyears.append(diet09, ignore_index = True) diet_allyears=diet_allyears.append(diet11, ignore_index = True) diet_allyears=diet_allyears.append(diet13, ignore_index = True) diet_allyears=diet_allyears.append(diet15, ignore_index = True) diet_allyears=diet_allyears.append(diet17, ignore_index = True) #select only desired COLUMNS diet_allyears2=diet_allyears[['SEQN','DBQ700']].copy() #rename COLS diet_allyears2.rename(columns={'SEQN':'Patient ID', 'DBQ700':'How healthy diet (1 is best)' }, inplace=True) diet_allyears2['How healthy diet (1 is best)'].replace([7,9],np.nan,inplace=True) diet_allyears2['How healthy diet (1 is best)'].value_counts().sort_index() #drop rows with nas diet_allyears3=diet_allyears2.dropna(axis=0) #Import data for files with medication information #import 7 medication files drug05=pd.read_csv('RXQ_RX_D_NHANES_Prescription_Medications_2005.csv') drug07=pd.read_csv('RXQ_RX_E_NHANES_Prescription_Medications_2007.csv') drug09=pd.read_csv('RXQ_RX_F_NHANES_Prescription_Medications_2009.csv') drug11=pd.read_csv('RXQ_RX_G_NHANES_Prescription_Medications_2011.csv') drug13=pd.read_csv('RXQ_RX_H_NHANES_Prescription_Medications_2013.csv',encoding='windows-1252') drug15=
pd.read_csv('RXQ_RX_I_NHANES_Prescription_Medications_2015.csv')
pandas.read_csv
from datetime import timedelta import numpy as np import pytest from pandas._libs import iNaT import pandas as pd from pandas import ( Categorical, DataFrame, Index, IntervalIndex, NaT, Series, Timestamp, date_range, isna, ) import pandas._testing as tm class TestSeriesMissingData: def test_categorical_nan_equality(self): cat = Series(Categorical(["a", "b", "c", np.nan])) exp = Series([True, True, True, False]) res = cat == cat tm.assert_series_equal(res, exp) def test_categorical_nan_handling(self): # NaNs are represented as -1 in labels s = Series(Categorical(["a", "b", np.nan, "a"])) tm.assert_index_equal(s.cat.categories, Index(["a", "b"])) tm.assert_numpy_array_equal( s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8) ) def test_fillna_nat(self): series = Series([0, 1, 2, iNaT], dtype="M8[ns]") filled = series.fillna(method="pad") filled2 = series.fillna(value=series.values[2]) expected = series.copy() expected.values[3] = expected.values[2] tm.assert_series_equal(filled, expected) tm.assert_series_equal(filled2, expected) df = DataFrame({"A": series}) filled = df.fillna(method="pad") filled2 = df.fillna(value=series.values[2]) expected = DataFrame({"A": expected}) tm.assert_frame_equal(filled, expected) tm.assert_frame_equal(filled2, expected) series = Series([iNaT, 0, 1, 2], dtype="M8[ns]") filled = series.fillna(method="bfill") filled2 = series.fillna(value=series[1]) expected = series.copy() expected[0] = expected[1] tm.assert_series_equal(filled, expected) tm.assert_series_equal(filled2, expected) df = DataFrame({"A": series}) filled = df.fillna(method="bfill") filled2 = df.fillna(value=series[1]) expected = DataFrame({"A": expected}) tm.assert_frame_equal(filled, expected) tm.assert_frame_equal(filled2, expected) def test_isna_for_inf(self): s = Series(["a", np.inf, np.nan, pd.NA, 1.0]) with pd.option_context("mode.use_inf_as_na", True): r = s.isna() dr = s.dropna() e = Series([False, True, True, True, False]) de = Series(["a", 1.0], index=[0, 4]) tm.assert_series_equal(r, e) tm.assert_series_equal(dr, de) def test_isnull_for_inf_deprecated(self): # gh-17115 s = Series(["a", np.inf, np.nan, 1.0]) with pd.option_context("mode.use_inf_as_null", True): r = s.isna() dr = s.dropna() e = Series([False, True, True, False]) de = Series(["a", 1.0], index=[0, 3]) tm.assert_series_equal(r, e) tm.assert_series_equal(dr, de) def test_timedelta64_nan(self): td = Series([timedelta(days=i) for i in range(10)]) # nan ops on timedeltas td1 = td.copy() td1[0] = np.nan assert isna(td1[0]) assert td1[0].value == iNaT td1[0] = td[0] assert not isna(td1[0]) # GH#16674 iNaT is treated as an integer when given by the user td1[1] = iNaT assert not isna(td1[1]) assert td1.dtype == np.object_ assert td1[1] == iNaT td1[1] = td[1] assert not isna(td1[1]) td1[2] = NaT assert
isna(td1[2])
pandas.isna
import numba import numpy as np import pandas as pd import itertools from scipy import stats __all__ = ['crosscorr'] def _mat_stat(mat, indices, stat_func, min_n, method): results = np.zeros((indices.shape[0], 3)) for pairi in range(indices.shape[0]): i, j = indices[pairi, :] notnan = (~np.isnan(mat[:, i]) & ~np.isnan(mat[:, j])) n = np.sum(notnan) if n >= min_n: rho, pvalue = stat_func(mat[notnan, i], mat[notnan, j], method) else: rho, pvalue = np.nan, np.nan results[pairi, 0] = rho results[pairi, 1] = pvalue results[pairi, 2] = n return results _mat_stat_nb = numba.jit(_mat_stat, nopython=True, parallel=True, error_model='numpy') def crosscorr(data, left_cols=None, right_cols=None, method='spearman', min_n=5): """Compute correlation among columns of data, ignoring NaNs. Uses numba for speedup and parallelization. Results have been tested against scipy.stats.spearmanr and scipy.stats.pearsonr Parameters ---------- left_cols, right_cols : list of columns in data If both are present then return correlations between columns in left vs. right. If only left is not None then limit correlations to left vs. left. If neither are not None then compute all correlations. method : str Method can be spearman or pearson. min_n : int Minimum number of observation required to compute the correlation (otherwise NaN is returned) Returns ------- results : pd.DataFrame Results with columns: Left, Right, rho, pvalue, N""" if left_cols is None: left_cols = data.columns if right_cols is None: right_cols = left_cols columns = data.columns.tolist() l_coli = [columns.index(c) for c in left_cols] r_coli = [columns.index(c) for c in right_cols] column_pairs = np.array([ij for ij in itertools.product(left_cols, right_cols)]) indices = np.array([ij for ij in itertools.product(l_coli, r_coli)]) res = _mat_stat_nb(np.asarray(data), indices, _numba_corr, min_n, method) """SAS and pearsonr look the statistic up in a t distribution while R uses the normnal""" res[:, 1] = 2 * stats.distributions.t.sf(np.abs(res[:, 1]), res[:, 2]) # res[:, 1] = 2 * stats.norm.cdf(-np.abs(res[:, 1])) results =
pd.DataFrame(res, columns=['rho', 'pvalue', 'N'])
pandas.DataFrame