prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
| tm.assert_frame_equal(res_df, exp_df) | pandas.util.testing.assert_frame_equal |
"""
This module handles the conversion of scraped data to different formats.
"""
# inbuilt or third party libs
import pandas as pd
import json
from datetime import datetime
import logging
from typing import Dict, Optional, List, Union
from pathlib import Path
import os
from ast import literal_eval as make_tuple
# project modules
from modules import misc, style
from locations import dirs, paths
def convert_dict_into_df(docketlist: List[Dict], county: str) -> pd.DataFrame:
# SET PANDAS OPTIONS FOR PRINT DISPLAY
pd.set_option("display.max_columns", 20)
| pd.set_option("display.width", 2000) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
import os
import ee
import datetime
import tqdm
import json
import pandas as pd
import geopandas as gp
import numpy as np
import rsfuncs as rs
import multiprocessing as mp
import scipy.interpolate as interp
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map # or thread_map
ee.Initialize()
# Helper functions
def dict2arr(data_dict, var_name):
'''converts ee dictionary output from .getInfo() to a numpy array. Wraps array_from_df'''
data = data_dict[var_name]
lats = data_dict['latitude']
lons = data_dict['longitude']
df = pd.DataFrame([data,lats,lons]).T
df.columns = [var_name, "latitude", 'longitude']
arr = rs.array_from_df(df, var_name)
return arr
def map_cdl2fmp(dictionary,array):
'''maps values on cdl image to the fmp'''
mapping = dictionary.copy()
vec1 = []
vec2 = []
for k,v in mapping.items():
for i in v:
if i == "":
continue
else:
vec1.append(int(i))
vec2.append(int(k))
out_im = np.zeros_like(array)
for k,v in dict(zip(vec1,vec2)).items():
out_im[array==k] =v
return out_im
def map_fmp2kc(dictionary,array):
'''maps values on fmp image to kc'''
mapping = dictionary.copy()
vec1 = []
vec2 = []
for k,v in mapping.items():
vec1.append(k)
vec2.append(v)
out_im = np.zeros_like(array)
for k,v in dict(zip(vec1,vec2)).items():
out_im[array==k] =v
return out_im
def get_monthly_et(dataset, start, end, aoi):
'''
Get gridded monthly ET sums from MODIS
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
resolution = dataset[3]
dt_idx = pd.date_range(start,end, freq='MS')
ims = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
for i in num_steps[:]:
t1 = ee.Date(start).advance(i, 'month')
t2 = t1.advance(1, 'month');
im = ee.Image(ImageCollection.select(var).filterDate(t1, t2).sum().set('system:time_start', t1.millis()))
modis_dat = im.pixelLonLat().addBands(im).multiply(scaling_factor).reduceRegion(reducer=ee.Reducer.toList(),
geometry=aoi,
scale=1000, crs ='EPSG:4326')
modis_dict = modis_dat.getInfo()
modis_im = dict2arr(modis_dict, var)
ims.append(modis_im)
return ims
def calc_monthly_sum(dataset, startdate, enddate, area):
'''
Calculates monthly sums (pd.Dataframe) for EE data given startdate, enddate, and area
Datasets are stored in `data` dict below.
Note the "scaling_factor" parameter,
which is provided by EE for each dataset, and further scaled by temporal resolution to achieve monthly resolution
This is explicitly written in the `data` dict
EE will throw a cryptic error if the daterange you input is not valid for the product of interest, or if the AOI is e.g. in middle of ocean
'''
ImageCollection = dataset[0]
var = dataset[1]
scaling_factor = dataset[2]
resolution = dataset[3]
dt_idx = pd.date_range(startdate,enddate, freq='MS')
sums = []
seq = ee.List.sequence(0, len(dt_idx)-1)
num_steps = seq.getInfo()
for i in num_steps:
start = ee.Date(startdate).advance(i, 'month')
end = start.advance(1, 'month');
im = ee.Image(ImageCollection.select(var).filterDate(start, end).sum().set('system:time_start', start.millis()))
scale = im.projection().nominalScale()
scaled_im = im.multiply(scaling_factor).multiply(ee.Image.pixelArea()).multiply(1e-12) # mm --> km^3
sumdict = scaled_im.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area,
scale = resolution,
bestEffort= True)
total = sumdict.getInfo()[var]
sums.append(total)
sumdf = pd.DataFrame(np.array(sums), dt_idx)
sumdf.columns = [var]
df = sumdf.astype(float)
return df
def resample_1km_30m(im_1km,im_30m):
'''
Interpolates 1 km modis data on to 30m landsat grid
'''
W, H = im_1km.shape[:2]
new_W, new_H = im_30m.shape[:2]
xrange = lambda x: np.linspace(0, 1, x)
f = interp.interp2d(xrange(H), xrange(W), im_1km, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def interp_modis_nans(modis_image):
'''
interpolates nans in modis imagery. Doesn't work if a whole row/col at edge of image is all nans
'''
W, H = modis_image.shape[:2]
# Mask nans
array = np.ma.masked_invalid(modis_image)
# Make the outgrid
xi = np.linspace(0, H, H)
yi = np.linspace(0, W, W)
xx, yy = np.meshgrid(xi, yi)
# xx, yy = np.meshgrid(new_W, new_H)
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
new_arr = interp.griddata((x1, y1), newarr.ravel(), (xx, yy),method='linear')
return new_arr
def find_nearest_nlcd(yearint, yearlist = [2001, 2004, 2006, 2008, 2011, 2013, 2016]):
absolute_diff = lambda list_value : abs(list_value - yearint)
closest_value = min(yearlist, key=absolute_diff)
return closest_value
def process_poly(polylist):
'''
main routine
'''
polygon, polyidx, outdir = polylist[0], polylist[1], polylist[2]
tqdm.write("Processing Polygon {}".format(polyidx))
# Setup write dir
# outdir = os.path.join(os.getcwd(), "../data/ETkc")
# if not os.path.exists(outdir):
# os.mkdir(outdir)
# Check if file already exists
outfn = os.path.join(outdir, str(polyidx) +".csv")
if os.path.exists(outfn):
print("already processed {} ... skipping".format(polyidx))
return
# Load data
kc = pd.read_csv('../data/fmp_kc_faunt.csv')
data = rs.load_data()
aoi = ee.Geometry.Polygon(polygon)
polarea = float(aoi.area().getInfo())
# check if polygon is very small, if area < 1 pixel, skip
if polarea < 500**2:
print("{} is smaller than 1 MODIS Pixel, skipping =====================".format(polyidx))
print("area = {} m^2".format(str(polarea)))
print(polygon)
return
else:
try:
# Define timerange
years = range(2001, 2021)
yearlydat = []
for y in years[:]:
yearstart = "{}-01-01".format(str(y))
yearend = "{}-12-31".format(str(y))
# Select the nlcd dataset (2001, 2004, 2006, 2008, 2011, 2014, 2016)
nearest_year_start = "{}-01-01".format(str(find_nearest_nlcd(y)))
nlcd_col = ee.ImageCollection('USGS/NLCD')
nlcd = nlcd_col.filterDate(ee.Date(nearest_year_start), ee.Date(nearest_year_start).advance(1, 'years')).first()
# Compile NLCD
nlcd_dat = ee.Image.pixelLonLat().addBands(nlcd).reduceRegion(reducer=ee.Reducer.toList(),geometry=aoi,scale=30)
nlcd_dict = nlcd_dat.getInfo()
# get PET classes (11-water, 81-crops, 82-pasture), and make everything else AET
nlcd_im = dict2arr(nlcd_dict, 'landcover')
petmask = np.isin(nlcd_im, [11,81], invert = False).reshape(nlcd_im.shape).astype(int)
aetmask = np.isin(nlcd_im, [11,81], invert = True).reshape(nlcd_im.shape).astype(int)
# Select the correct or most recent CDL
if y < 2008:
cdl = ee.Image("USDA/NASS/CDL/2008")
else:
cdl = ee.Image("USDA/NASS/CDL/{}".format(str(y)))
# Compile CDL
cdl_dat = ee.Image.pixelLonLat().addBands(cdl).reduceRegion(reducer=ee.Reducer.toList(),geometry=aoi,scale=30)
cdl_dict = cdl_dat.getInfo()
# Make the ims
cdl_im = dict2arr(cdl_dict, 'cropland')
# Map values from the CDL to the FMP
mapping = rs.cdl_2_faunt()
fmp_im = map_cdl2fmp(mapping, cdl_im)
# Map values from the FMP to kc (Schmid, 2004)
monthly_ims = []
for i in kc.columns[2:]:
kcvals = kc[i]
kckeys =kc[kc.columns[0]]
kcdict = dict(zip(kckeys, kcvals))
kc_im = map_fmp2kc(kcdict, fmp_im)
monthly_ims.append(kc_im)
aet = calc_monthly_sum(data['modis_aet'], yearstart, yearend, aoi)
pet = calc_monthly_sum(data['modis_pet'], yearstart, yearend, aoi)
aetims = get_monthly_et(data['modis_aet'], yearstart, yearend, aoi = aoi)
petims = get_monthly_et(data['modis_pet'], yearstart, yearend, aoi = aoi)
# Record mean kc per image
kc_means = np.array([np.mean(x) for x in monthly_ims])
# Apply the kc method, convert mm to km = 1e-6; m^2 to km^2 = 1e-6; 900 m^2 / cell
sums = []
for aetim, petim ,kcim in zip(aetims, petims, monthly_ims):
tpet = np.nansum(resample_1km_30m(interp_modis_nans(petim), kcim)* kcim *petmask)* 1e-12 * 900
taet = np.nansum(resample_1km_30m(interp_modis_nans(aetim), kcim)* aetmask)*1e-12 * 900
sums.append(np.sum([tpet, taet]))
petsum = [np.nansum(x)*1e-9 * 900 for x in petims]
aetsum = [np.nansum(x)*1e-9 * 900 for x in aetims]
ETdf = | pd.DataFrame([sums]) | pandas.DataFrame |
# coding: utf-8
import re
import numpy as np
import pandas as pd
def vnpy_opt_DAY_IN(opt):
"""
vnpy 优化结果清洗,用于 DAY_IN 和 DAY_OUT
:param opt:
:return:
"""
data = re.compile(r'DAY_IN\':\s(\d+),\s\'DAY_OUT\':\s(\d+)\}"\]:\s([\d\.]+)').findall(opt)
data = np.array(data).T
dic = {
"DAY_IN": pd.Series(data[0], dtype=np.int),
"DAY_OUT": pd.Series(data[1], dtype=np.int),
"capital": pd.Series(data[2], dtype=np.float)
}
return pd.DataFrame(dic)
def vnpy_opt_DAY_IN_2(opt):
"""
vnpy 优化结果清洗, 针对 DAY_IN_2
:param opt:
:return:
"""
data = re.compile(r'DAY_IN_2\':\s(\d+)\}"\]:\s([\d\.]+)').findall(opt)
data = np.array(data).T
dic = {
"DAY_IN_2": pd.Series(data[0], dtype=np.int),
"capital": pd.Series(data[1], dtype=np.float)
}
return pd.DataFrame(dic)
def testest():
| pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
import logging
import yaml
import datetime
import json
import time
import sys
import holoviews as hv
from holoviews import opts
from holoviews.element import Div
from bokeh.models import HoverTool
hv.extension('bokeh')
allowed_ontologies = ["KO", "EC", "SSO", "RO", "META", "MSRXN",
"MSCPD", "MSCPX", "BIGG", "BIGGCPD", "GO", "TC", "RHEA"]
def df_to_ontology(params, pass_df=None, method="Import Annotations"):
'''
Takes the text file from staging, or the pandas df passed from the merge
app, and converts to an ontology dictionary suitable from the annotation
ontology API add_annotation_ontology_events() method
The merge app also calls this, and it can use the same params that the
import gives... all shared features are in both (except for the
annotations_file which the html_add_ontology_summary needs to fix)
The new bulk app also calls this, using pass_df and a "fake" params
'''
if isinstance(pass_df, pd.DataFrame):
annotations = pass_df
else:
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term']
)
# remove duplicate rows, if any
annotations = annotations.drop_duplicates()
ontology = {
'event_id': params['description'],
'description': params['description'],
'ontology_id': params['ontology'],
'method': method, # from above
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(annotations['gene'].nunique()), # not used in the api
'term_count': int(annotations['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in annotations.iterrows():
if pd.notnull(row['term']):
if row['gene'] in ontology['ontology_terms']:
ontology['ontology_terms'][row['gene']].append(
{'term': row['term']}
)
else:
ontology['ontology_terms'][row['gene']] = [
{'term': row['term']}
]
return [ontology]
def bulk_df_to_ontology(params):
ontologies = []
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term', 'ontology', 'description']
)
for description, description_df in annotations.groupby(annotations['description']):
for ontology, ontology_df in description_df.groupby(description_df['ontology']):
if ontology.upper() not in allowed_ontologies:
sys.exit(f"ERROR: {ontology} is not a valid Ontology string")
time.sleep(2) # This just "guarantees" the timestamps will all be different
ontology_df = ontology_df[ontology_df['term'].notna()]
ontology_df = ontology_df.drop_duplicates()
ontology = {
'event_id': description,
'description': description,
'ontology_id': ontology.upper(),
'method': "Import Bulk Annotations",
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(ontology_df['gene'].nunique()), # not used in the api
'term_count': int(ontology_df['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in ontology_df.iterrows():
if pd.notnull(row['term']):
if row['gene'] in ontology['ontology_terms']:
ontology['ontology_terms'][row['gene']].append(
{'term': row['term']}
)
else:
ontology['ontology_terms'][row['gene']] = [
{'term': row['term']}
]
ontologies.append(ontology)
logging.info(len(ontology['ontology_terms']))
for gene in ontology['ontology_terms']:
logging.info(description + "\t" + gene)
return ontologies
def get_app_version():
with open("/kb/module/kbase.yml", 'r') as stream:
data_loaded = yaml.load(stream)
return str(data_loaded['module-version'])
def html_header():
report = []
report.append("<style>* {font-family: sans-serif; font-size: 14px}</style>")
return report
def html_add_ontology_summary(params, ontology, api_results, output_directory):
logging.info(api_results)
output_file = os.path.join(output_directory, "add_ontology_summary.html")
# Make report directory and copy over files
report = html_header()
report.append(f'<h3>Import Annotations Summary</h3>')
report.append(f'<b>Import App version:</b> {get_app_version()}<br>')
if "annotation_file" in params:
report.append(f'<b>Annotations file:</b> {params["annotation_file"]}<br>')
report.append(f'<b>Input Ref:</b> {params["genome"]}<br>')
report.append(f'<b>Output Ref:</b> {api_results["output_ref"]}<br>')
report.append(f'<b>Output Name:</b> {api_results["output_name"]}<br><br>')
report.append(f'<b>Features (found):</b> {api_results["ftrs_found"]}<br>')
report.append(f'<b>Features (not found):</b> {len(api_results["ftrs_not_found"])}<br><br>')
# make table
report.append(
'<table cellspacing="0" cellpadding="3" border="1"><tr><th>Description</th><th>Timestamp</th><th>Ontology</th><th>Genes in file</th><th>Terms in file</th></tr>')
for import_event in ontology:
gene_count = len(import_event["ontology_terms"])
# term_count =
report.append(
f'<tr style="background-color:#EEEEEE"><td>{import_event["description"].split(":")[0]}</td><td>{import_event["timestamp"]}</td><td>{import_event["ontology_id"]}</td><td>{import_event["gene_count"]}</td><td>{import_event["term_count"]}</td></tr>')
report.append('</table>')
# add missing terms
if len(api_results["ftrs_not_found"]) > 0:
report.append(
f'<br><b>These genes were not found in the genome:</b> <br>{("<br>").join(api_results["ftrs_not_found"])}<br>')
# Write to file
with open(output_file, 'w') as f:
for line in report:
f.write(line + "\n")
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'HTML report for import_annotations app'}
def get_event_lists(ontology):
# print(type(ontology['feature_types']))
gene_features = [k for k, v in ontology['feature_types'].items() if v == "gene"]
events = {}
for event in ontology["events"]:
event_id = event['event_id']
events[event_id] = {'genes': [],
'terms': [],
'msrxns': [],
'gene_msrxns': [],
'description': event['description'],
'timestamp': event['timestamp'],
'method': event['method'],
'method_version': event['method_version'],
'ontology_id': event['ontology_id']
}
for gene in event["ontology_terms"]:
if gene in gene_features:
events[event_id]['genes'].append(gene)
for entry in event["ontology_terms"][gene]:
if "term" in entry.keys():
events[event_id]['terms'].append(entry['term'])
if "modelseed_ids" in entry.keys():
events[event_id]['msrxns'] += entry['modelseed_ids']
for msrxn in entry['modelseed_ids']:
events[event_id]['gene_msrxns'].append(gene + '_' + msrxn)
events[event_id]['genes'] = list(set(events[event_id]['genes']))
events[event_id]['terms'] = list(set(events[event_id]['terms']))
events[event_id]['msrxns'] = list(set(events[event_id]['msrxns']))
events[event_id]['gene_msrxns'] = list(set(events[event_id]['gene_msrxns']))
return events
def html_get_ontology_summary(event_summary, output_directory, to_highlight=[]):
'''
Only counts gene features, ignores cds
The highlight part is a list of descriptions to have their rows highlighted.
'''
output_file = os.path.join(output_directory, "get_ontology_summary.html")
report = html_header()
report.append(f'<h3>Compare Annotations Summary</h3>')
report.append(
'<table cellspacing="0" cellpadding="3" border="1"><tr><th>Description</th><th>Timestamp</th><th>KBase App</th><th>Ontology</th><th>Genes</th><th>Unique Terms</th><th>Unique ModelSEED rxns</th><th>Unique Gene/ModelSEED rxn pairs</th></tr>')
# get counts and add to new line of table
for event in event_summary:
if event_summary[event]["description"] in to_highlight:
report.append(f'<tr style="background-color:#6CD075"><td>{event_summary[event]["description"].split(":")[0]}</td><td>{event_summary[event]["timestamp"]}</td><td>{event_summary[event]["method"]} v{event_summary[event]["method_version"]}</td><td>{event_summary[event]["ontology_id"]}</td><td>{len(event_summary[event]["genes"])}</td><td>{len(event_summary[event]["terms"])}</td><td>{len(event_summary[event]["msrxns"])}</td><td>{len(event_summary[event]["gene_msrxns"])}</td></tr>')
else:
report.append(f'<tr><td>{event_summary[event]["description"].split(":")[0]}</td><td>{event_summary[event]["timestamp"]}</td><td>{event_summary[event]["method"]} v{event_summary[event]["method_version"]}</td><td>{event_summary[event]["ontology_id"]}</td><td>{len(event_summary[event]["genes"])}</td><td>{len(event_summary[event]["terms"])}</td><td>{len(event_summary[event]["msrxns"])}</td><td>{len(event_summary[event]["gene_msrxns"])}</td></tr>')
report.append('</table>')
if len(to_highlight) > 0:
report.append(
f'<span style="background-color:#6CD075;font-size:12px"><i>* these highlighted rows were used for the merge</i></span>')
print(output_file)
# Write to file
with open(output_file, 'w') as f:
for line in report:
f.write(line + "\n")
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'Summary Report'}
def merge_details_report(df, output_directory):
output_file = os.path.join(output_directory, "merge_details.txt")
df.to_csv(output_file, sep="\t", index=False)
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'Merge Details'}
def filter_selected_ontologies(ontology, params, workflow="compare"):
'''
unique will not use params and just give all unique event_ids.
compare and merge workflows filter out the ontologies to those selected in
the UI, but the merge workflow also adds the annotation_weights to the
events. both return all unique if no events are selected, and defaulting to
a weight of 1 for the merge
The unique functionality is added because of a current bug
'''
ontology_selected = {"events": [],
"feature_types": ontology["feature_types"]}
added_ontologies = []
# the list of selections have different names depending on the app
if workflow == "compare":
selected_events = params['annotations_to_compare']
elif workflow == "merge":
selected_events = params['annotations_to_merge']
elif workflow == "unique":
selected_events = []
for event in ontology["events"]:
if event["description"] not in added_ontologies: # keeps duplicates from being added twice
if workflow == "unique": # add all, don't filter
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
else:
if len(selected_events) == 0: # if nothing is selected, then grab all events
if workflow == "merge":
event['annotation_weight'] = 1
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
else: # then grab only events in selected events, which is different for compare vs merge
if workflow == "compare":
if event["description"] in selected_events:
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
elif workflow == "merge":
for selected_event in selected_events:
# add if event in selected events, or if the first annotation_source is empty
if event["description"] in selected_event["annotation_source"] or len(selected_events[0]['annotation_source']) == 0:
event['annotation_weight'] = selected_event["annotation_weight"]
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
return ontology_selected
def merge_ontology_events(ontology):
'''
The annotation ontology api can put annotations in the cds features as well
as the gene features. This code only considers gene features, and ignores
annotations in cds features. I think they only get added to cds features if
they were aliases
Now, adds to a dictionary by gene/rxn/event... this will keep an event from
double dipping and scoring twice for a gene_msrxn pair
'''
# get counts and add to new line of table
gene_features = [k for k, v in ontology['feature_types'].items() if v == "gene"]
ontology_merged = {}
for event in ontology["events"]:
event_id = event['event_id']
for gene in event["ontology_terms"]:
if gene in gene_features:
for entry in event["ontology_terms"][gene]:
if "modelseed_ids" in entry.keys():
for MSRXN in entry['modelseed_ids']:
if gene in ontology_merged:
if MSRXN in ontology_merged[gene]:
ontology_merged[gene][MSRXN][event_id] = event['annotation_weight']
else:
ontology_merged[gene][MSRXN] = {
event_id: event['annotation_weight']}
else:
ontology_merged[gene] = {
MSRXN: {event_id: event['annotation_weight']}}
return ontology_merged
def score_mergers(ontology_merged, params):
'''
returns a pandas dataframe suitable for the import annotations workflow
'''
df = pd.DataFrame(columns=['gene', 'term', 'score', 'gene_treshold'])
for gene_id in ontology_merged:
if params["keep_best_annotation_only"] == 1:
best_score = 0
for MSRXN in ontology_merged[gene_id]:
MSRXN_sum = sum(ontology_merged[gene_id][MSRXN].values())
if MSRXN_sum > best_score:
best_score = MSRXN_sum
# if best only is true and best_score is above threshold, use best_score as new threshold
if best_score > params["annotation_threshold"]:
gene_threshold = best_score
else:
gene_threshold = params["annotation_threshold"]
else:
gene_threshold = params["annotation_threshold"]
for MSRXN in ontology_merged[gene_id]:
MSRXN_sum = sum(ontology_merged[gene_id][MSRXN].values())
event_series = pd.Series(ontology_merged[gene_id][MSRXN])
# logging.info(
# gene_id + "\t" + MSRXN + "\t" + str(ontology_merged[gene_id][MSRXN]) + "\t" + str(MSRXN_sum) + "\t" + str(gene_threshold))
score_series = pd.Series(data={
'gene': gene_id,
'term': MSRXN,
'score': MSRXN_sum,
'gene_treshold': gene_threshold})
score_series = score_series.append(event_series)
if MSRXN_sum >= gene_threshold:
score_series = score_series.append(pd.Series({'pass': 1}))
else:
score_series = score_series.append( | pd.Series({'pass': 0}) | pandas.Series |
import pandas as pd
import numpy as np
import joblib
import time
df2016 = pd.read_csv('/home/shaury/Desktop/pvsc/alibaba/final.csv')
df2017 = pd.read_csv('/home/shaury/final2017.csv')
df2018 = pd.read_csv('/home/shaury/Desktop/pvsc/alibaba/final2016.csv')
df = pd.concat([df2016,df2017,df2018]).drop("Unnamed: 0",axis=1).reset_index(drop=True)
df['time'] = | pd.to_datetime(df['time'],format= "%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
from emgpb2.models import *
from simulation.path_drawer import draw_path as drawer
import pandas as pd
def create_path_constant_volocity_one_model(output_measurements='data/measurement1.csv',
output_groundtruth='data/groundtruth1.csv',
q=0.5,
r=1.0,
state_dim=4,
obs_dim=2,
t=200):
# create constant velocity model
constant_velocity = ConstantVelocity(dt=1.0, q=q, r=r, state_dim=state_dim, obs_dim=obs_dim)
Q = constant_velocity.Q
R = constant_velocity.R
F = constant_velocity.A
H = constant_velocity.H
# Start point
x_tminus1 = np.asarray([[0.0], [0.0], [0.0], [0.0]])
path = []
meas = []
for i in range(t):
x_t_ = F @ x_tminus1
x_t = np.random.multivariate_normal(np.squeeze(x_t_), Q)
x_t = x_t.reshape((4, 1))
y_t_ = H @ x_t
y_t = np.random.multivariate_normal(np.squeeze(y_t_), R)
y_t = y_t.reshape((2, 1))
path.append(x_t)
meas.append(y_t)
x_tminus1 = x_t
path = np.squeeze(np.asarray(path))
meas = np.squeeze(np.asarray(meas))
# drawer(meas, path)
print('F:')
print(F)
print('H:')
print(H)
print('Q: ')
print(Q)
print('R: ')
print(R)
print('=====================================================')
if output_measurements is not None:
meas_df = pd.DataFrame(meas)
meas_df.to_csv(output_measurements, index=False, header=False)
if output_groundtruth is not None:
truth_df = pd.DataFrame(path)
truth_df.to_csv(output_groundtruth, index=False, header=False)
return meas
def create_path_constant_volocity_multi_model(output_measurements='data/measurement2.csv',
output_groundtruth='data/groundtruth2.csv',
q: list = [1.0, 6.0],
r: list = [0.75, 0.5],
state_dim=4,
obs_dim=2,
t=200,
change_pnt=[100]):
# create constant velocity model
num_of_models = len(q)
constant_velocity_list = []
for i in range(num_of_models):
constant_velocity_list.append(ConstantVelocity(dt=1.0, q=q[i], r=r[i], state_dim=state_dim, obs_dim=obs_dim))
Q = []
R = []
F = []
H = []
for i in range(num_of_models):
Q.append(constant_velocity_list[i].Q)
R.append(constant_velocity_list[i].R)
F.append(constant_velocity_list[i].A)
H.append(constant_velocity_list[i].H)
# Start point
x_tminus1 = np.asarray([[0.0], [0.0], [0.0], [0.0]])
# model switching controller
kf_ind = 0
kf_change_pnt = change_pnt
path = []
meas = []
for i in range(t):
x_t_ = F[kf_ind] @ x_tminus1
x_t = np.random.multivariate_normal(np.squeeze(x_t_), Q[kf_ind])
x_t = x_t.reshape((state_dim, 1))
y_t_ = H[kf_ind] @ x_t
y_t = np.random.multivariate_normal(np.squeeze(y_t_), R[kf_ind])
y_t = y_t.reshape((obs_dim, 1))
if i in kf_change_pnt:
kf_ind += 1
path.append(x_t)
meas.append(y_t)
x_tminus1 = x_t
path = np.squeeze(np.asarray(path))
meas = np.squeeze(np.asarray(meas))
# drawer(meas, path)
for i in range(len(kf_change_pnt) + 1):
print('F_' + str(i) + ': ')
print(F[i])
print('H_' + str(i) + ': ')
print(H[i])
print('Q_' + str(i) + ': ')
print(Q[i])
print('R_' + str(i) + ': ')
print(R[i])
print('-----------------------------------')
print('=====================================================')
if output_measurements is not None:
meas_df = pd.DataFrame(meas)
meas_df.to_csv(output_measurements, index=False, header=False)
if output_groundtruth is not None:
truth_df = pd.DataFrame(path)
truth_df.to_csv(output_groundtruth, index=False, header=False)
return meas
def create_path_random_walk_multi_model(output_measurements='data/measurement3.csv',
output_groundtruth='data/groundtruth3.csv',
q: list = [2.0, 10.0],
r: list = [1.0, 0.8],
state_dim=2,
t=500,
change_pnt=[300]):
# create constant velocity model
num_of_models = len(q)
random_walk_list = []
for i in range(num_of_models):
random_walk_list.append(RandomWalk(q=q[i], r=r[i], state_dim=state_dim))
Q = []
R = []
F = []
H = []
for i in range(num_of_models):
Q.append(random_walk_list[i].Q)
R.append(random_walk_list[i].R)
F.append(random_walk_list[i].A)
H.append(random_walk_list[i].H)
# Start point
x_tminus1 = np.asarray([[0.0], [0.0]])
# model switching controller
kf_ind = 0
kf_change_pnt = change_pnt
path = []
meas = []
for i in range(t):
x_t_ = F[kf_ind] @ x_tminus1
x_t = np.random.multivariate_normal(np.squeeze(x_t_), Q[kf_ind])
x_t = x_t.reshape((2, 1))
y_t_ = H[kf_ind] @ x_t
y_t = np.random.multivariate_normal(np.squeeze(y_t_), R[kf_ind])
y_t = y_t.reshape((2, 1))
if i in kf_change_pnt:
kf_ind += 1
path.append(x_t)
meas.append(y_t)
x_tminus1 = x_t
path = np.squeeze(np.asarray(path))
meas = np.squeeze(np.asarray(meas))
# drawer(meas, path)
for i in range(len(kf_change_pnt) + 1):
print('F_' + str(i) + ': ')
print(F[i])
print('H_' + str(i) + ': ')
print(H[i])
print('Q_' + str(i) + ': ')
print(Q[i])
print('R_' + str(i) + ': ')
print(R[i])
print('-----------------------------------')
print('=====================================================')
if output_measurements is not None:
meas_df = | pd.DataFrame(meas) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import itertools
def dados_teses():
diretorio = "/media/hdvm02/bd/007/002/007/002"
teses_anos = sorted(os.listdir(diretorio))
lista_dfs = []
for tese_ano in teses_anos:
csv = os.path.join(diretorio,tese_ano)
teses = pd.read_csv(csv, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False)
lista_dfs.append(teses)
print(tese_ano)
#print(teses.columns.tolist())
#print(teses.shape)
print(teses.dtypes)
#print(teses.tail())
#print(teses.describe())
print("########")
print("########")
#dataset_teses = pd.concat(lista_dfs, ignore_index=True)
#dataset_teses.to_csv(f'{diretorio}/01_dados_1987_2012.csv', index=False)
#print(dataset_teses)
#return dataset_teses
def analisa_teses():
diretorio = "/media/hdvm02/bd/007/002/007/002"
#df_teses = dados_teses()
teses = pd.read_csv(f'{diretorio}/01_dados_1987_2012.csv')
busca = teses[teses["TituloTese"].str.contains("mercosul|mercosur", case=False, na=False)]
#print(busca)
agrupar_por_ano = busca["TituloTese"].groupby(busca["AnoBase"])
busca_por_ano = agrupar_por_ano.count()
#print(busca_por_ano)
quant_termos = busca_por_ano.values.tolist()
anos = busca_por_ano.index.tolist()
fig_pandas = busca_por_ano.plot(kind="line", x=anos, y=quant_termos)
print(fig_pandas)
def gera_dataframes():
diretorio = "/media/hdvm02/bd/007/002/007/002"
anos = sorted(os.listdir(diretorio))
anos_1987_2012 = anos[1:27]
anos_2013_2016 = anos[27:31]
anos_2017_2021 = anos[31:]
df_1987_2012 = []
df_2013_2016 = []
df_2017_2021 = []
df_geral = []
for ano_1987, ano_2013, ano_2017 in itertools.zip_longest(anos_1987_2012, anos_2013_2016, anos_2017_2021):
csv_1987 = os.path.join(diretorio, ano_1987)
teses = pd.read_csv(csv_1987, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False)
df_1987_2012.append(teses)
if ano_2013 != None:
csv_2013 = os.path.join(diretorio, ano_2013)
teses = pd.read_csv(csv_2013, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False)
df_2013_2016.append(teses)
if ano_2017 != None:
csv_2017 = os.path.join(diretorio, ano_2017)
teses = pd.read_csv(csv_2017, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False)
df_2017_2021.append(teses)
df_1987_2012_final = pd.concat(df_1987_2012, ignore_index=True)
df_2013_2016_final = pd.concat(df_2013_2016, ignore_index=True)
df_2017_2021_final = | pd.concat(df_2017_2021, ignore_index=True) | pandas.concat |
# Libraries
import inspect
import pandas as pd
# Specific
from optparse import OptionParser
# Libraries for pint
from pint import UnitRegistry
# -----------------------------------------
# Main
# -----------------------------------------
# Create
ureg = UnitRegistry() #auto_reduce_dimensions = False
# Define units
ureg.define('beat = count')
ureg.define('breath = count')
ureg.define('copy = count')
ureg.define('percent = count = %')
# Show all attributes
#print(vars(ureg).keys())
"""
methods = inspect.getmembers(ureg, predicate=inspect.ismethod)
functions = inspect.getmembers(ureg, predicate=inspect.isfunction)
#print(ureg.has_option('beat'))
print(ureg.get_name('beat'))
print(ureg.get_name('feo'))
#print(methods)
for e in methods:
print(e[0])
# Show all units
supported_units = pd.DataFrame(vars(ureg)['_units'].keys()).sort_values(0)
#print(supported_units)
#supported_units.to_csv('units.csv')
#supported
distance = 42 * ureg.kilometers
"""
dictionary = {
'hct': 1 * ureg('U/L'), # L/L
'plt': 1 * ureg('kilocount/uL'), # K/uL
'alb': 1 * ureg('mg/dL'), # g/dL, g/L, U/L, umol/L
'wbc': 1 * ureg('kilocount/uL'), # K/uL
'neutrophils': 1 * ureg('gigacount/L'), # x10^9/L or % of WBC
'lymphocytes': 1 * ureg('gigacount/L'), # x10^9/L or % of WBC
'monocytes': 1 * ureg('gigacount/L'), # x10^9/L or % of WBC
'haemoglobin': 1, # g/dL
'ast': 1 * ureg('count/L'), # IU/L
'alt': 1 * ureg('count/L'), # U/L
'alp': 1 * ureg('count/L'), # U/L
'bil': 1 * ureg('mg/dL'), # umol/L or mg/dL
'creatinine': 1, # mg/dL or umol/L
'creatine_kinase': 1 * ureg('ng/mL'), # u/L or ng/mL
'sodium': 1 * ureg('mmol/L'), # mmol/L
'potasium': 1 * ureg('mmol/L'), # mmol/L
'urea': 1 * ureg('mg/dL'), # mmol/L or mg/dL
'lactate': 1 * ureg('mmol/dL'), # mg/dL or mmol/dL
'tk': 1,
'tck': 1,
'fibrinogen': 1 * ureg('g/L'), # g/L
'inr': 1,
'body_temperature': 1 * ureg.celsius, # celsius
'age': 2 * ureg.year, # year
'height': 1 * ureg.cm, # cm
'weight': 1 * ureg.kg, # kg
'pcr_dengue_load': 1 * ureg('copy/mL'), # copies/mL
'igm': 1 * ureg('count/mL'), # u/mL
'igg': 1 * ureg('count/mL'), # u/ml
'dbp': 1 * ureg.mmHg, # mmHg or millimeter_Hg
'sbp': 1 * ureg.mmHg, # mmHg or millimeter_Hg
'pulse': 1 * ureg('beats/minute'), # beats_per_minute (not in pint)
'respiratory_rate': 1 * ureg('breaths/minute'), # breaths_per_minute (not in pint)
'hct_percent': 1 * ureg('percent')
}
# Create DataFrame
df = | pd.Series(dictionary) | pandas.Series |
import copy
import pickle as pickle
import os
import sys
import time
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from numpy import array, float32
import seaborn as sns
FRAME = 10
TWINDOW = 300
TDELTA = 600 # 300
MIN_FRAME = 30
nan = np.nan
LOG_DIR = 'experiment_logs/'
prefix = os.path.expanduser('~')
SAVE_DIR = prefix+'/Dropbox/'
X_VARS = ['time', 'n_opt_calls', 'n_runs', 'n_learning_iters']
Y_VARS = ['n_success', 'opt_cost', 'tree_life']
def get_colors(n_colors):
return cm.rainbow(np.linspace(0, 1, n_colors))
def get_test_data(keywords, include, exclude, pre=False, rerun=False,
tdelta=TDELTA, wind=TWINDOW, lab='', lenthresh=0.99,
split_runs=False, label_vars=[]):
exp_probs = os.listdir(LOG_DIR)
all_data = {}
for k in keywords:
used = []
all_data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
if dir_name.find(k) < 0 and dir_prefix.find(k) < 0:
continue
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:full_dir.rfind('_')]
if full_exp in used: continue
all_data[k][full_exp] = {}
used.append(full_exp)
i = 0
data = []
while i < 20:
cur_dir = '{0}_{1}'.format(full_exp, i)
if not os.path.isdir(cur_dir):
i += 1
continue
fnames = os.listdir(cur_dir)
if pre:
info = [f for f in fnames if f.find('hl_test_pre') >= 0 and f.endswith('pre_log.npy')]
elif rerun:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
else:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
if len(info):
for fname in info:
# print(('Loading data from', fname, full_dir))
try:
data.append(np.load(cur_dir+'/'+fname))
except Exception as e:
print('Skipping', fname, full_dir)
continue
label = gen_label(cur_dir, label_vars, split_runs, i)
all_data[k][full_exp][cur_dir] = {}
all_data[k][full_exp][cur_dir][cur_dir] = []
for buf in data:
for pts in buf:
pt = pts[0]
no, nt = int(pt[4]), int(pt[5])
all_data[k][full_exp][cur_dir][cur_dir].append({'time': pt[3], 'success at end': pt[0], 'path length': pt[1], 'distance from goal': pt[2], 'n_data': pt[6], 'key': (no, nt), 'label': label, 'ind': i, 'success anywhere': pt[7], 'optimal_rollout_success': pt[9], 'number of plans': pt[10], 'subgoals anywhere': pt[11], 'subgoals closest distance': pt[12], 'collision': pt[8], 'exp id': i})
if len(pt) > 13:
all_data[k][full_exp][cur_dir][cur_dir][-1]['any target'] = pt[13]
if len(pt) > 14:
all_data[k][full_exp][cur_dir][cur_dir][-1]['smallest tolerance'] = pt[14]
if len(pt) > 16:
all_data[k][full_exp][cur_dir][cur_dir][-1]['success with postcond'] = pt[16]
if len(pt) > 17:
all_data[k][full_exp][cur_dir][cur_dir][-1]['success with adj_eta'] = pt[17]
if len(pt) > 18:
all_data[k][full_exp][cur_dir][cur_dir][-1]['episode return'] = pt[18]
# all_data[k][full_exp][cur_dir][cur_dir].append({'time': (pt[3]//tdelta+1)*tdelta, 'success at end': pt[0], 'path length': pt[1], 'distance from goal': pt[2], 'n_data': pt[6], 'key': (no, nt), 'description': label, 'ind': i, 'success anywhere': pt[7], 'optimal_rollout_success': pt[9], 'number of plans': pt[10]})
i += 1
return all_data
def get_policy_data(policy, keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
r = 'policy_{0}_log.txt'.format(policy)
rollout_data = {}
if not os.path.isfile(full_dir+'/'+r):
r = 'policy_{0}_log.pkl'.format(policy)
if not os.path.isfile(full_dir+'/'+r): continue
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
print('Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
for pt in r_data:
pt['exp id'] = 0
if type(pt['train_loss']) is dict:
pt['train_loss'] = pt['train_loss']['loss']
if type(pt['val_loss']) is dict:
pt['val_loss'] = pt['val_loss']['loss']
if 'var' in pt and type(pt['var']) is dict:
pt['var'] = pt['var'][policy]
rollout_data[r] = r_data
data[k][full_exp][full_dir] = rollout_data
return data
def get_motion_data(keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
file_names = [fname for fname in file_names if fname.find('MotionInfo') >= 0]
rollout_data = {'motion': []}
for r in file_names:
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
for pt in r_data:
pt['exp id'] = 0
if full_exp.find('full_feed') >= 0:
pt['opt duration per ts'] = 1.4 / pt['opt duration per ts']
pt['description'] = 'RoboSuite'
elif full_exp.find('panda') >= 0:
pt['opt duration per ts'] = 1.4 / pt['opt duration per ts']
pt['description'] = 'RoboDesk'
print('MOTION: Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
rollout_data['motion'].extend(r_data)
data[k][full_exp][full_dir] = rollout_data
return data
def get_rollout_info_data(keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
file_names = [fname for fname in file_names if fname.find('RolloutInfo') >= 0]
rollout_data = {'rollout': []}
for r in file_names:
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
for pt in r_data:
pt['exp id'] = 0
goal_vals = pt.get('per_goal_success', {'basegoal': 0.})
for goal in goal_vals:
new_pt = copy.copy(pt)
new_pt['goal'] = goal
new_pt['success rate'] = goal_vals[goal]
rollout_data['rollout'].append(new_pt)
print('ROLLOUT: Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
#rollout_data['rollout'].extend(r_data)
data[k][full_exp][full_dir] = rollout_data
return data
def get_rollout_data(keywords=[], nfiles=20, exclude=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
full_dir = dir_prefix + dir_name
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
rollout_logs = ['rollout_log_{0}_True.txt'.format(i) for i in range(nfiles)]# [f for f in file_names if f.startswith('rollout')]
rollout_logs += ['rollout_log_{0}_False.txt'.format(i) for i in range(nfiles)]
rollout_data = {}
for r in rollout_logs:
if not os.path.isfile(full_dir+'/'+r): continue
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
try:
r_data = eval(next_data)
except:
continue
for pt in next_data:
pt['exp id'] = 0
rollout_data[r] = r_data
else:
print(('no data for', r))
data[k][full_exp][full_dir] = rollout_data
return data
def gen_first_success_plots(x_var='time'):
exp_probs = os.listdir(LOG_DIR)
master_plot = []
all_data = []
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
full_dir = dir_prefix + dir_name
with open(full_dir+'/exp_info.txt', 'r') as f:
exp_info = f.read()
file_names = os.listdir(full_dir)
rollout_logs = [f for f in file_names if f.startswith('rollout') and f.endswith('False.txt')]
rollout_data = {}
ts_to_data = {}
for i, r in enumerate(rollout_logs):
print((exp_name, dir_name, r))
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
costs = eval(next_data)
if 'avg_first_success' not in costs[0]: continue
for j, c in enumerate(costs):
c['ind'] = np.mean(c['avg_first_success'])
if j not in ts_to_data:
ts_to_data[j] = []
ts_to_data[j].append(c)
rollout_data[r] = costs
xs = [np.mean([c[x_var] for c in ts_to_data[n]]) for n in ts_to_data]
ys = [np.mean([c['ind'] for c in ts_to_data[n]]) for n in ts_to_data]
all_data.append((exp_name+dir_name, xs, ys))
plt.title('Steps to first success')
plt.xlabel(x_var)
plt.ylabel('Avg. step of first success')
colors = get_colors(len(all_data))
for i, (label, xs, ys) in enumerate(all_data):
plt.plot(xs, ys, label=label, color=colors[i])
plt.savefig(SAVE_DIR+'/goal_vs_{0}.png'.format(x_var), pad_inches=0.01)
plt.clf()
def get_td_loss(keywords=[], exclude=[], pre=False):
tdelta = 5
exp_probs = os.listdir(LOG_DIR)
exp_data = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
# exp_data = []
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(keywords):
skip = True
for k in keywords:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for k in exclude:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = True
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:-1]
i = 0
data = []
while i < 20:
if not os.path.isdir(full_exp+str(i)):
i += 1
continue
fnames = os.listdir(full_exp+str(i))
info = [f for f in fnames if f.find('td_error') >= 0 and f.endswith('npy') and f.find('test') < 0]
if len(info):
cur_data = []
for step in info:
cur_data.append(np.load(full_exp+str(i)+'/'+step))
dlen = max([len(dt) for dt in cur_data])
data.append([])
for n in range(dlen):
data[-1].append(np.mean([cur_data[ind][n] for ind in range(len(cur_data)) if n < len(cur_data[ind])], axis=0))
data[-1] = np.array(data[-1])
i += 1
if not len(data):
print(('skipping', full_exp))
continue
dlen = min([len(d) for d in data])
dmax = max([len(d) for d in data])
print(('Gathering data for', full_exp, 'length:', dlen, 'all len:', [len(d) for d in data]))
end = False
cur_t = 0
while not end:
end = True
for d in data:
next_frame = d[cur_t:cur_t+tdelta] # [pt[0] for pt in d if pt[0,3] >= cur_t and pt[0,3] <= cur_t + TWINDOW]
if len(next_frame):
end = False
if len(next_frame) >= MIN_FRAME:
next_pt = np.mean(next_frame, axis=0)
no, nt = 0, 0 # int(next_pt[4]), int(next_pt[2])
if (no, nt) not in exp_data:
exp_data[no, nt] = []
exp_data[no, nt].append((full_exp, cur_t, next_pt[0]))
cur_t += tdelta
'''
for i in range(dmax - FRAME):
cur_t = np.mean([d[i:i+FRAME,:,3] for d in data if i+FRAME < len(d)])
for d in data:
if len(d) < i+FRAME: continue
cur_fr = np.mean(d[i:i+FRAME], axis=0)
for pt in cur_fr:
val = pt[0]
# cur_t = pt[3]
nt = int(pt[2])
no = int(pt[4])
if (no, nt) not in exp_data:
exp_data[no, nt] = []
exp_data[no, nt].append((full_exp, cur_t, val))
'''
for no, nt in exp_data:
print('Plotting', no, nt, exp_name)
pd_frame = pd.DataFrame(exp_data[no, nt], columns=['exp_name', 'time', 'value'])
sns.set()
sns_plot = sns.relplot(x='time', y='value', hue='exp_name', kind='line', data=pd_frame)
keyid = ''
for key in keywords:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
sns_plot.savefig(SAVE_DIR+'/{0}obj_{1}targ_td_error{2}{3}.png'.format(no, nt, keyid, pre_lab))
def get_fail_info(keywords=[], exclude=[], pre=False, rerun=False, xvar='time', avg_time=True, tdelta=TDELTA, wind=TWINDOW, lab='', lenthresh=0.99, label_vars=[], include=[], max_t=14400):
exp_probs = os.listdir(LOG_DIR)
exp_data = {}
exp_len_data = {}
exp_dist_data = {}
exp_true_data = {}
targets = {}
used = []
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(keywords):
skip = False
for k in keywords:
if dir_name.find(k) < 0 and dir_prefix.find(k) < 0:
skip = True
if skip: continue
if len(include):
skip = True
for k in include:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = False
if skip: continue
print(dir_name)
if len(exclude):
skip = False
for k in exclude:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:full_dir.rfind('_')]
if full_exp in used: continue
used.append(full_exp)
i = 0
data = []
while i < 20:
cur_dir = '{0}_{1}'.format(full_exp, i)
if not os.path.isdir(cur_dir):
i += 1
continue
fnames = os.listdir(cur_dir)
info = [f for f in fnames if f.find('failure') >= 0 and f.endswith('data.txt')]
if len(info):
for fname in info:
print(('Loading data from', fname))
with open(cur_dir+'/'+fname, 'r') as f:
data.append(f.read().splitlines())
label = gen_label(cur_dir, label_vars)
for buf in data:
for pts in buf:
pts = eval(pts)
no, nt = int(pts['no']), int(pts['nt'])
if (no,nt) not in exp_data: exp_data[no,nt] = []
if (no,nt) not in targets: targets[no,nt] = []
pts['exp_name'] = label
exp_data[no,nt].append(pts)
targs = pts['goal']
targinfo = []
for targind in range(len(targs)):
if targs[targind] == 1:
targinfo.append(targind)
targets[no,nt].append([tuple(targinfo)])
i += 1
for no, nt in targets:
print(('Plotting', no, nt, exp_name))
pd_frame = | pd.DataFrame(targets[no, nt], columns=['targets']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
pd.options.display.max_rows=10
import numpy as np
from indicators import indicators_for_dirverse,indicators_for_crosses
from dateutil.parser import parse
import talib
import sys
from scipy import interpolate
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf-8')
CLOSE_SAMPLE=[ 24.76, 24.28, 25.21, 26.25, 28.88, 28.99, 28.35, 31.19,34.31, 36.49, 35.67, 32.1 , 32.18, 31.7 , 30.8 , 30.77,29.77, 27.7 , 28.76]
LOW_SAMPLE=[ 24.2 , 24.01, 23.41, 24. , 26.37, 27.25, 27.4 , 31.19,33.4 , 33.4 , 35.08, 32.1 , 30.7 , 31.01, 30.27, 30.5 ,29.45, 27.6 , 27.7 ]
GBKM_SAMPLE=[ 75.27683505, 74.16337925, 74.90652869, 77.40264178,81.75542302, 86.66794839, 88.29240889, 86.10675256,84.7067632 , 87.00756837, 90.50308921, 89.76234594,82.57561793, 71.43528003, 59.91510841, 50.53179488,43.08981872, 36.17388661, 29.83802615]
CLOSE_SAMPLE2=[ 20.33, 21.05, 21.49, 20.29, 22.32, 24.55, 27.01, 29.71,32.68, 31.77, 34.95, 38.45, 42.3 , 46.53, 51.18, 50. ,47.5 , 48. , 47. , 42.41, 43.68, 48.05]
LOW_SAMPLE2=[ 19.99, 20.25, 20.68, 20.29, 19.78, 22.81, 25. , 28.36,30.45, 30.7 , 31.18, 35.52, 41.1 , 46.53, 47.65, 46.63,45.5 , 46. , 45.5 , 42.3 , 41.5 , 43.18]
GBKM_SAMPLE2=[ 93.71592611, 91.21636003, 87.46623061, 83.41955066,80.66983087, 81.01571395, 84.73545107, 90.40899863,95.05322187, 96.89845728, 96.5647677 , 95.76976925,96.00042368, 97.37205819, 98.6860291 , 99.1305236 ,98.05462598, 94.43946125, 88.22010362, 79.89313723,70.47144951, 62.78129296]
def indicators_for_dirverse(df):
df=df.dropna()
df=df.sort_index()
df['MACD'],df['MACD_signal'],df['MACD_hist']=talib.MACD(np.array(df['close']))
df['var1']=(2*df['close']+df['high']+df['low']+df['open'])/5.0
df['var2']=talib.MIN(np.array(df['low']),timeperiod=34)
df['var3']=talib.MAX(np.array(df['low']),timeperiod=34)
df['buffer']=(df['var1']-df['var2'])/(df['var3']-df['var2'])*100
df['SK']=talib.EMA(np.array(df['buffer']),timeperiod=13)
df['SD']=talib.EMA(np.array(df['SK']),timeperiod=3)
df['MACD_MIN']=talib.MIN(np.array(df['MACD']),timeperiod=9)
df['PRICE_MIN']=talib.MIN(np.array(df.close),9)
df['PRICE_MAX']=talib.MAX(np.array(df.close),9)
df['RSI'] = talib.RSI(np.array(df.close))
df=df.sort_index(ascending=False)
df=df.dropna()
return df
def indicators_for_crosses(df):
df=df.dropna()
df=df.sort_index()
df['MA5']=talib.MA(np.array(df['close']),5)
df['MA10']=talib.MA(np.array(df['close']),10)
df['MA20']=talib.MA(np.array(df['close']),20)
df['LONG_ARR']=(df['MA5']>df['MA10'])&(df['MA10']>df['MA20'])
df['SHORT_ARR']=(df['MA5']<df['MA10'])&(df['MA10']<df['MA20'])
df['PRICE_MAX']=talib.MAX(np.array(df.close),3)
df=df.sort_index(ascending=False)
df=df.dropna()
return df
def diverse_strategy_buy(df): #策略
'''
PRICE_MIN_9,MACD,MIN_MACD
'''
df['price_gorge']=df['PRICE_MIN']!=df['PRICE_MIN'].shift(-1)
df['MACD_gorge']=df['MACD']>df['MACD_MIN'].shift(-1)
df['SDSKlt20']=(df['SK']<20)&(df['SD']<20)
df['buy_signal']=df['SDSKlt20']&df['MACD_gorge']&df['price_gorge']
df=df.dropna()
return df
def diverse_strategy_sell(df):
'''背离'''
df['price_peak']=df['PRICE_MAX']!=df['PRICE_MAX'].shift(-1)
df['MACD_peak']=df['MACD']>df['MACD_MIN'].shift(-1)
df['SDSKgt80']=(df['SK']>80)&(df['SD']>80)
#df['quick_sell']=(df['ma5']<df['ma20'])&(df['ma5'].shift(-1)>df['ma20'].shift(-1))
#df['LossLimit']=df['close']<df['PRICE_MAX']*0.92
df['sell_signal']=(df['SDSKgt80']&df['MACD_peak']&df['price_peak'])#|df['LossLimit']#|df['quick_sell']
df=df.dropna()
return df
def golden_cross(df):
df=indicators_for_crosses(df)
df['buy_signal']=df['LONG_ARR']&(df['SHORT_ARR'].shift(-4))
df=df.dropna()
return df
def dead_cross(df):
df=indicators_for_crosses(df)
df['sell_signal']=df['SHORT_ARR']&(df['LONG_ARR'].shift(-4))
df=df.dropna()
return df
def return_similarity(va,vb,ignore_start=True):
'''regardless of where you start'''
va=np.array(va)
vb=np.array(vb)
lena=len(va)
lenb=len(vb)
if lena!=lenb:
if lena>lenb:
sarr=vb
larr=va
if lena<lenb:
sarr=va
larr=vb
xs=np.array(np.linspace(1,len(sarr),len(sarr)))
xl=np.array(np.linspace(1,len(sarr),len(larr)))
f = interpolate.interp1d(xs, sarr)
va = f(xl)
vb = larr
if ignore_start:
va=va-va[0]
vb=vb-vb[0]
num=float(va.T.dot(vb))
denom=np.linalg.norm(va)*np.linalg.norm(vb)
an_cos=num/denom
an_sin=0.5+0.5*an_cos
#越接近1,相似度越高
return an_sin
def rs(arr,*args,**kwargs):
arr=list(arr)
results=[]
for sample in args:
results.append(return_similarity(arr,sample,ignore_start=True))
result=np.mean(results)
return result
def rs2(arr,*args,**kwargs):
arr=list(arr)
results=[]
for sample in args:
results.append(return_similarity(arr,sample,ignore_start=False))
result=np.mean(results)
return result
def stra_simi(df,idx,ignore_start=True,*args,**kwargs):
'''
idx:column name youwant to compare with
args should be passed into samples of list or array type
kwargs' keys got 'name'...
'''
if not args:
return
bucket=[]
for sample in args:
bucket.append(df[idx].rolling(center=False,window=len(sample)).apply(func=rs,args=args))
srs=pd.concat(bucket,axis=1)
if kwargs:
df[kwargs['name']]=srs.apply(np.mean,axis=1)
else:
df['Similarity']=srs.apply(np.mean,axis=1)
df=df.sort_index()
df=df.dropna()
return df
def stra_simi2(df,idx,ignore_start=True,*args,**kwargs):
'''
idx:column name youwant to compare with
args should be passed into samples of list or array type
kwargs' keys got 'name'...
'''
if not args:
return
bucket=[]
for sample in args:
bucket.append(df[idx].rolling(center=False,window=len(sample)).apply(func=rs2,args=args))
srs= | pd.concat(bucket,axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import numba
import seaborn as sns
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from hfhd import hd
@numba.njit
def garch_11(n, sigma_sq_0, mu, alpha, beta, omega):
r"""
Generate GARCH(1, 1) log-returns of size n.
This function is accelerated via JIT with Numba.
Parameters
----------
n : int
The length of the wished time series.
sigma_sq_0 : float > 0
The variance starting value.
mu : float:
The drift of log-returns.
alpha : float >= 0
The volatility shock parameter. A higher value will lead to
larger spikes in volatility. A.k.a short-term persistence.
beta : float >= 0
The volatility persistence parameter. A larger value will
result in stronger persistence. A.k.a long-term persistence.
omega : float > 0
The variance constant. A higher value results in a higher
mean variance.
Returns
-------
r : numpy.ndarray
The GARCH log-returns time series.
sigma_sq : numpy.ndarray
The resulting variance time series with which each log-return
was generated.
Notes
-----
In general, the conditional variance of a GARCH(p,q) model is given by
.. math:: \sigma_{t}^{2}=\omega+\sum_{i=1}^{q} \alpha_{i}
\varepsilon_{t-i}^{2}+\sum_{j=1}^{p} \beta_{j} \sigma_{t-j}^{2}.
The unconditional variance is given by
.. math:: \sigma^{2}=\frac{\omega}{1-\sum_{i=1}^{q}
\alpha_{i}-\sum_{j=1}^{p} \beta_{j}}.
Here, :math:`p=q=1`,
and :math:`\epsilon_{t} \sim \mathcal{N}\left(0, 1\right)`
"""
nu = np.random.normal(0, 1, n)
r = np.zeros(n)
epsilon = np.zeros(n)
sigma_sq = np.zeros(n)
sigma_sq[0] = sigma_sq_0
if min(alpha, beta) < 0:
raise ValueError('alpha, beta need to be non-negative')
if omega <= 0:
raise ValueError('omega needs to be positive')
if alpha+beta >= 1:
print('''alpha+beta>=1, variance not defined
--> time series will not be weakly stationary''')
for i in range(n):
if i > 0:
sigma_sq[i] = omega + alpha * epsilon[i-1]**2 + beta * sigma_sq[i-1]
epsilon[i] = (sigma_sq[i]**0.5) * nu[i]
r[i] = mu + epsilon[i]
return r, sigma_sq
class Universe:
r"""
The universe is a specification from which simulated realizations
can be sampled. Stocks follow a factor model, they belong
to industries and have an idiosyncratic component. Stocks are predictable
by a single feature.
Attributes
----------
feature_beta : float
The true coefficient.
factor_garch_spec : list
The garch specification for factor returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
industry_garch_spec : list
The garch specification for industry returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
resid_garch_spec : list
The garch specification for residual returns.
``[sigma_sq_0, mu, alpha, beta, omega]``
factor_loadings : numpy.ndarray
An array with factor loadings for each stock and factor.
dim = n_stocks x n_factors
industry_loadings : numpy.ndarray
An array with industry loadings for each stock and industry.
dim = n_stocks x n_industry
This is usually a sparse matrix. One stock loads typically on
one or two industries. A good number of industries is 10 to 20.
liquidity : float
A value between 0 and 1 that describes liquidity.
A value of 1 means that the probability of observation
is 100% each minute. 0.5 means that there is a 50%
probability of observing a price each minute.
gamma : float >=0
The microstructure noise will be zero-mean Gaussian with variance
$\gamma^2 var(r)$, where $var(r)$ is the variance of the
underlying true return process. This noise is be added to the price.
freq : str, ``'s'`` or ``'m'``.
The granularity of the discretized continous price process.
"""
def __init__(self, feature_beta, factor_garch_spec, industry_garch_spec,
resid_garch_spec, factor_loadings, industry_loadings,
liquidity=0.5, gamma=2, freq='m'):
self.feature_beta = feature_beta
self.factor_garch_spec = factor_garch_spec
self.industry_garch_spec = industry_garch_spec
self.resid_garch_spec = resid_garch_spec
self.factor_loadings = factor_loadings
self.industry_loadings = industry_loadings
self.liquidity = liquidity
self.gamma = gamma
self.freq = freq
self.n_stocks = self.factor_loadings.shape[0]
self.n_ind = self.industry_loadings.shape[1]
self.n_factors = self.factor_loadings.shape[1]
@staticmethod
def uncond_var(spec):
'''
Compute the uncoditional variance from a
GARCH(1,1) specification.
Parameters
----------
spec : list
The garch specification.
``[sigma_sq_0, mu, alpha, beta, omega]``
Returns
-------
float
The unconditional variance.
'''
return spec[4]/(1-spec[2]-spec[3])
def uncond_cov(self):
'''
Compute the uncoditional covariance of stock returns
in the universe from a universe specification.
Returns
-------
numpy.ndarray
The unconditional covariance matrix.
'''
sf = np.diag([self.uncond_var(self.factor_garch_spec)]*self.n_factors)
sr = np.diag([self.uncond_var(self.resid_garch_spec)]*self.n_stocks)
si = np.diag([self.uncond_var(self.industry_garch_spec)]*self.n_ind)
return (self.factor_loadings @ sf @ self.factor_loadings.T
+ sr
+ self.industry_loadings @ si @ self.industry_loadings.T)
def cond_cov(self):
'''
Compute the daily coditional integrated covariance matrix of stock
returns within regular market hours in the universe from a realized
universe simulation.
Returns
-------
list
A list containing the conditional integrated covariance matrices
of each day.
'''
sr = pd.DataFrame(self.sigma_sq_resid)
sr.index = pd.to_datetime(sr.index, unit=self.freq)
sr = sr.between_time('9:30', '16:00',
include_start=True,
include_end=True)
sr = sr.resample('1d').sum()
si = | pd.DataFrame(self.sigma_sq_industry) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
( | TS('2015-01-05') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# Prerequisite:
# 1. the database contains the whole week's data of last week until last Sunday.
# e.g. if today is 9/26/18 Wed, it must contains the data until 9/23/18 Sunday
#
# The program uses ISO Calendar:
# 1. first day and last day of the week are respectively Monday(1) and Sunday(7)
# 2. the last few days could be counted as in the first week of the next year
# e.g. 2014-12-31 is in the week01 of 2015
# vice versa:
# e.g. 2016-01-01 is in the week53 of 2015
#
# If Gmail is used to receive Outlier Alert emails,
# the gmail account's 2 step verification has to be disabled,
# and access from less secure apps should be allowed.
#
# Environment Variables:
# the following variables need to be setup in airflow.cfg
# smtp_host = 'smtp.gmail.com'
# smtp_starttls = True
# smtp_ssl = True
# smtp_user = 'email address owner's email account'
# smtp_password = '<PASSWORD>'
# smtp_port = '587'
# smtp_mail_from = 'email address to send from'
# the folllowing variables need to be setup airflow's webserver UI: Admin -> Variables
# email_to = 'address1, address2'
# email_cc = 'address3, address4'
"""
This DAG is to perform Outlier Detection for each individual Council District of LA city
"""
import logging
import os
from datetime import datetime, timedelta
import airflow
import altair as alt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.email import send_email
from dateutil.relativedelta import relativedelta
# a csv file with this filename will be saved from a copy of postgres table
filename = "/tmp/myla311.csv"
prefix = "/tmp/"
# get the current Council District data and init cd_dict
cd_list = pd.read_csv(
"https://opendata.arcgis.com/datasets/76104f230e384f38871eb3c4782f903d_13.csv",
index_col=False,
)
cd_list.OBJECTID = cd_list.OBJECTID.astype(float)
cd_dict = {}
for index, row in cd_list.iterrows():
cd_dict[row.OBJECTID] = row.NAME
no_cd = len(cd_dict)
# email parameters Notice: email addresses need to updated in production
message = "This last week's Outliers for Council District {} of LA City:"
test = "<EMAIL>,<EMAIL>"
test1 = "<EMAIL>"
email_to = {key: test for key in cd_dict.keys()}
email_cc = {key: test1 for key in cd_dict.keys()}
subject = "[Alert] MyLA311 Data Outliers"
# outlier type identifiers
INDIV_HIGH_OUTLIER = "HIGH INDIVIDUAL REQUEST TYPE OUTLIER"
INDIV_LOW_OUTLIER = "LOW INDIVIDUAL REQUEST TYPE OUTLIER"
TOTAL_HIGH_OUTLIER = "HIGH TOTAL OUTLER"
TOTAL_LOW_OUTLIER = "LOW TOTAL OUTLIER"
DIFF_HIGH_OUTLIER = "HIGH TOTAL DIFF OUTLIER"
DIFF_LOW_OUTLIER = "LOW TOTAL DIFF OUTLIER"
HIGH_PROCESS_TIME_OUTLIER = "HIGH PROCESS TIME OUTLIER"
LOW_PROCESS_TIME_OUTLIER = "LOW PROCESS TIME OUTLIER"
def make_save_graph(df, cd, col, title):
line = (
alt.Chart(df.reset_index(), title=" - ".join([title, col]))
.mark_line()
.encode(x="year_week:O", y=col + ":Q")
)
rule = (
alt.Chart(df)
.mark_rule(color="red")
.encode(alt.Y("average({})".format(col)), size=alt.value(1))
)
graph = line + rule
filename = "chart-cd{}-{}.png".format(
int(cd), col.replace("/", "-").replace(" ", "-")
)
graph.save(prefix + filename)
return filename
def make_save_boxplot(df, cd, point, title):
# using seaborn
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_title(title, fontsize=15)
sns.boxplot(ax=ax, x=df, linewidth=1, color="lightblue")
plt.scatter(point, 0, marker="o", s=100, c="red", linewidths=5, label="Outlier")
ax.legend()
filename = "chart-cd{}-Proc-Time-{}.png".format(
int(cd), title.replace("/", "-").replace(" ", "-")
)
plt.savefig(prefix + filename)
plt.close()
return filename
def detect_outliers(filename, **kwargs):
"""
Outlier Detector that detects the following types of outliers:
1. number of Individual Request Type per week, high and low outliers
2. number of Total requests per week, high and low outliers
3. the difference between last week and the week before
4. request process time high and low outliers
"""
# Retrieve data
logging.info("Data is being read from {}".format(filename))
df = pd.read_csv(filename, index_col=False)
logging.info(
"Data Reading is done from {}. Performing outlier detection".format(filename)
)
df.drop(
columns=["location_address", "location_city", "location_state", "location_zip"],
inplace=True,
)
# change data type from object to datatime
df["createddate"] = | pd.to_datetime(df["createddate"], errors="coerce") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 10:46:31 2018
@author: ksharpey
"""
# =============================================================================
# This script
# 1 - Produces extra Engineered Features (EF) on the unified view
# 2 - Filters known outliers, cleans dates
# 3 - exports clean & engineered to output folder
#
# This script does not: correlation, regreression
# =============================================================================
import os as os
import pandas as pd
import seaborn as sns
import datetime
from datetime import datetime
from dateutil import relativedelta
from dateutil.relativedelta import relativedelta
import calendar
import numpy as np
from sklearn import datasets, linear_model
import pandas as pd
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
os.getcwd()
#constants
INPUT_FILE = config['EFC']['INPUT_FILE']
OUTPUT_DIR = config['DEFAULT']['OUTPUT_DIR']
UNIQUE_ID_COL_NAME = '_id'
STATIC_INPUT_COLS = ['IN_form.bp2.accompany_vip',
'IN_form.has_bank_account_vip', 'IN_form.bp2.vehicle_vip',
'IN_form.bp2.has_danger_signs_vip', 'IN_form.bp2.bleeding_vip',
'IN_form.bp2.swelling_vip', 'IN_form.bp2.blurred_vision_vip',
'IN_form.bp2.convulsions_vip', 'IN_form.bp2.rupture_vip'] #9
DYNAMIC_COL_NAMES = ['form.bp2.care_of_home_vip'] #1
OUTCOME_FACTOR_COLS = [
'OC_form.where_born_vip', 'OC_form.delivery_nature_vip']
ENGINEERED_COL =['EF_tot_TP']
#ABS_FIRST_TP = datetime.strptime(str('01/01/2012'), '%m/%d/%Y')
# =============================================================================
# Prep df and output df
# =============================================================================
unified_view = pd.read_csv(OUTPUT_DIR+INPUT_FILE)
new_df = unified_view
# =============================================================================
# CLEAN DATES
# =============================================================================
new_df['OC_MIN_timeperiodStart'] = pd.to_datetime(new_df['OC_MIN_timeperiodStart'].dropna())
new_df['OC_MIN_timeperiodStart'].value_counts().sort_index()
#dobs = pd.to_datetime(new_df[new_df['OC_date_birth_vip']!='---']['OC_date_birth_vip'].dropna())
# =============================================================================
# HOT ENCODING
# https://lukesingham.com/whos-going-to-leave-next/
# =============================================================================
for i in range(0, len(OUTCOME_FACTOR_COLS)):
new_df[OUTCOME_FACTOR_COLS[i]] = 'EN_'+OUTCOME_FACTOR_COLS[i].upper()[3:]+'_' + new_df[OUTCOME_FACTOR_COLS[i]].astype(str)
# generate dummies column encoding and join
y = | pd.get_dummies(new_df[OUTCOME_FACTOR_COLS[i]]) | pandas.get_dummies |
from pathlib import Path
import copy
import pickle as pkl
from mmap import mmap
from scipy import stats as st
from scipy.stats._continuous_distns import FitDataError
import torch
from sklearn import svm
from sklearn import linear_model
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import os
import matplotlib.colors as mcolors
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import juggle_axes
from matplotlib.ticker import MaxNLocator
from joblib import Memory
import math
import lyap
import model_loader_utils as loader
import initialize_and_train as train
import utils
memory = Memory(location='./memoization_cache', verbose=2)
# memory.clear()
## Functions for computing means and error bars for the plots. 68% confidence
# intervals and means are currently
# implemented in this code. The commented out code is for using a gamma
# distribution to compute these, but uses a
# custom version of seaborn plotting library to plot.
def orth_proj(v):
n = len(v)
vv = v.reshape(-1, 1)
return torch.eye(n) - ([email protected])/(v@v)
USE_ERRORBARS = True
# USE_ERRORBARS = False
LEGEND = False
# LEGEND = True
folder_root = '../results/figs/'
def ci_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return bounds[1], bounds[0]
# ci_acc = 68
# ci_acc = 95
def est_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return median
# est_acc = "mean"
def ci_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return bounds[1], bounds[0]
# ci_dim = 68
# ci_dim = 95
def est_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return median
# est_dim = "mean"
def point_replace(a_string):
a_string = str(a_string)
return a_string.replace(".", "p")
def get_color(x, cmap=plt.cm.plasma):
"""Get normalized color assignments based on input data x and colormap
cmap."""
mag = torch.max(x) - torch.min(x)
x_norm = (x.float() - torch.min(x))/mag
return cmap(x_norm)
def median_and_bound(samples, perc_bound, dist_type='gamma', loc=0., shift=0,
reflect=False):
"""Get median and probability mass intervals for a gamma distribution fit
of samples."""
samples = np.array(samples)
def do_reflect(x, center):
return -1*(x - center) + center
if dist_type == 'gamma':
if np.sum(samples[0] == samples) == len(samples):
median = samples[0]
interval = [samples[0], samples[0]]
return median, interval
if reflect:
samples_reflected = do_reflect(samples, loc)
shape_ps, loc_fit, scale = st.gamma.fit(samples_reflected,
floc=loc + shift)
median_reflected = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval_reflected = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
median = do_reflect(median_reflected, loc)
interval = do_reflect(interval_reflected, loc)
else:
shape_ps, loc, scale = st.gamma.fit(samples, floc=loc + shift)
median = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
else:
raise ValueError("Distribution option (dist_type) not recognized.")
return median, interval
## Set parameters for figure aesthetics
plt.rcParams['font.size'] = 6
plt.rcParams['font.size'] = 6
plt.rcParams['lines.markersize'] = 1
plt.rcParams['lines.linewidth'] = 1
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.titlesize'] = 8
# Colormaps
class_style = 'color'
cols11 = np.array([90, 100, 170])/255
cols12 = np.array([37, 50, 120])/255
cols21 = np.array([250, 171, 62])/255
cols22 = np.array([156, 110, 35])/255
cmap_activation_pnts = mcolors.ListedColormap([cols11, cols21])
cmap_activation_pnts_edge = mcolors.ListedColormap([cols12, cols22])
rasterized = False
dpi = 800
ext = 'pdf'
# Default figure size
figsize = (1.5, 1.2)
ax_pos = (0, 0, 1, 1)
def make_fig(figsize=figsize, ax_pos=ax_pos):
"""Create figure."""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(ax_pos)
return fig, ax
def out_fig(fig, figname, subfolder='', show=False, save=True, axis_type=0,
name_order=0, data=None):
""" Save figure."""
folder = Path(folder_root)
figname = point_replace(figname)
# os.makedirs('../results/figs/', exist_ok=True)
os.makedirs(folder, exist_ok=True)
ax = fig.axes[0]
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_rasterized(rasterized)
if axis_type == 1:
ax.tick_params(axis='both', which='both',
# both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
left=False, top=False,
# ticks along the top edge are off
labelbottom=False,
labelleft=False) # labels along the bottom edge are off
elif axis_type == 2:
ax.axis('off')
if name_order == 0:
fig_path = folder/subfolder/figname
else:
fig_path = folder/subfolder/figname
if save:
os.makedirs(folder/subfolder, exist_ok=True)
fig_file = fig_path.with_suffix('.' + ext)
print(f"Saving figure to {fig_file}")
fig.savefig(fig_file, dpi=dpi, transparent=True, bbox_inches='tight')
if show:
fig.tight_layout()
fig.show()
if data is not None:
os.makedirs(folder/subfolder/'data/', exist_ok=True)
with open(folder/subfolder/'data/{}_data'.format(figname),
'wb') as fid:
pkl.dump(data, fid, protocol=4)
plt.close('all')
def autocorrelation(train_params, figname='autocorrelation'):
train_params_loc = train_params.copy()
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
# val_loss = params['history']['losses']['val']
# val_losses[i0, i1] = val_loss
# val_acc = params['history']['accuracies']['val']
# val_accs[i0, i1] = val_acc
train_samples_per_epoch = len(class_datasets['train'])
class_datasets['train'].max_samples = 10
torch.manual_seed(params['model_seed'])
X = class_datasets['train'][:][0]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif train_params_loc['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# X = utils.extend_input(X, 10)
loader.load_model_from_epoch_and_dir(model, run_dir, -1)
hid = []
hid += model.get_post_activations(X)[:-1]
# auto_corr_mean = []
# auto_corr_var = []
auto_corr_table = pd.DataFrame(columns=['t_next', 'autocorr'])
h = hid[0]
for i0 in range(len(hid)):
h_next = hid[i0]
overlap = torch.sum(h*h_next, dim=1)
norms_h = torch.sqrt(torch.sum(h**2, dim=1))
norms_h_next = torch.sqrt(torch.sum(h_next**2, dim=1))
corrs = overlap/(norms_h*norms_h_next)
avg_corr = torch.mean(corrs)
d = {'t_next': i0, 'autocorr': corrs}
auto_corr_table = auto_corr_table.append(pd.DataFrame(d),
ignore_index=True)
fig, ax = make_fig(figsize)
sns.lineplot(ax=ax, x='t_next', y='autocorr', data=auto_corr_table)
out_fig(fig, figname)
def snapshots_through_time(train_params, figname="snap", subdir="snaps"):
"""
Plot PCA snapshots of the representation through time.
Parameters
----------
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training.
"""
subdir = Path(subdir)
X_dim = train_params['X_dim']
FEEDFORWARD = train_params['network'] == 'feedforward'
num_pnts_dim_red = 800
num_plot = 600
train_params_loc = copy.deepcopy(train_params)
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(train_params_loc['model_seed'])
X, Y = class_datasets['train'][:]
if FEEDFORWARD:
T = 10
y = Y
X0 = X
else:
T = 30
# T = 100
X = utils.extend_input(X, T + 2)
X0 = X[:, 0]
y = Y[:, -1]
loader.load_model_from_epoch_and_dir(model, run_dir, 0, 0)
hid_0 = [X0]
hid_0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
train_params_loc['num_epochs'], 0)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if FEEDFORWARD:
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
# r0_n = r[0] / torch.norm(r[0])
# r1_n = r[1] / torch.norm(r[1])
#
# r0_n_v = r0_n.reshape(r0_n.shape[0], 1)
# r1_n_v = r1_n.reshape(r1_n.shape[0], 1)
# r0_orth = torch.eye(len(r0_n)) - r0_n_v @ r0_n_v.T
# r1_orth = torch.eye(len(r1_n)) - r1_n_v @ r1_n_v.T
# h = hid[10]
# # h_proj = h @ r_orth
# u, s, v = torch.svd(h)
# v0 = v[:, 0]
# def orth_projector(v):
# n = len(v)
# return (torch.eye(n) - v.reshape(n, 1)@v.reshape(1, n))/(v@v)
# v0_orth = (torch.eye(n) - v0.reshape(n,1)@v0.reshape(1,n))/(v0@v0)
# h_v0_orth = h @ v0_orth
# r0_e_p = orth_projector(r0_e)
# r1_e_p = orth_projector(r1_e)
# h_r0_e_p0 = h[y] @ r0_e_p
# h_r0_e_p1 = h[y] @ r1_e_p
coloring = get_color(y, cmap_activation_pnts)[:num_plot]
edge_coloring = get_color(y, cmap_activation_pnts_edge)[:num_plot]
## Now get principal components (pcs) and align them from time point to
# time point
pcs = []
p_track = 0
norm = np.linalg.norm
projs = []
for i1 in range(1, len(hid)):
# pc = utils.get_pcs_covariance(hid[i1], [0, 1])
out = utils.get_pcs_covariance(hid[i1], [0, 1], return_extra=True)
pc = out['pca_projection']
mu = out['mean']
proj = out['pca_projectors']
mu_proj = mu@proj[:, :2]
if i1 > 0:
# Check for the best alignment
pc_flip_x = pc.clone()
pc_flip_x[:, 0] = -pc_flip_x[:, 0]
pc_flip_y = pc.clone()
pc_flip_y[:, 1] = -pc_flip_y[:, 1]
pc_flip_both = pc.clone()
pc_flip_both[:, 0] = -pc_flip_both[:, 0]
pc_flip_both[:, 1] = -pc_flip_both[:, 1]
difference0 = norm(p_track - pc)
difference1 = norm(p_track - pc_flip_x)
difference2 = norm(p_track - pc_flip_y)
difference3 = norm(p_track - pc_flip_both)
amin = np.argmin(
[difference0, difference1, difference2, difference3])
if amin == 1:
pc[:, 0] = -pc[:, 0]
proj[:, 0] = -proj[:, 0]
elif amin == 2:
pc[:, 1] = -pc[:, 1]
proj[:, 1] = -proj[:, 1]
elif amin == 3:
pc[:, 0] = -pc[:, 0]
pc[:, 1] = -pc[:, 1]
proj[:, 0] = -proj[:, 0]
proj[:, 1] = -proj[:, 1]
pc = pc + mu_proj
p_track = pc.clone()
pcs.append(pc[:num_plot])
projs.append(proj)
def take_snap(i0, scats, fig, dim=2, border=False):
# ax = fig.axes[0]
hid_pcs_plot = pcs[i0][:, :dim].numpy()
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
xc = (xm + xM)/2
yc = (ym + yM)/2
hid_pcs_plot[:, 0] = hid_pcs_plot[:, 0] - xc
hid_pcs_plot[:, 1] = hid_pcs_plot[:, 1] - yc
v = projs[i0]
# u, s, v = torch.svd(h)
if r.shape[0] == 2:
r0_p = r[0]@v
r1_p = r[1]@v
else:
r0_p = r.flatten()@v
r1_p = -r.flatten()@v
if class_style == 'shape':
scats[0][0].set_offsets(hid_pcs_plot)
else:
if dim == 3:
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
else:
scats[0].set_offsets(hid_pcs_plot)
scats[1].set_offsets(r0_p[:2].reshape(1, 2))
scats[2].set_offsets(r1_p[:2].reshape(1, 2))
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
max_extent = max(xM - xm, yM - ym)
max_extent_arg = xM - xm > yM - ym
if dim == 2:
x_factor = .4
if max_extent_arg:
ax.set_xlim(
[xm - x_factor*max_extent, xM + x_factor*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim(
[ym - x_factor*max_extent, yM + x_factor*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
else:
if max_extent_arg:
ax.set_xlim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_zlim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_zlim([ym - .1*max_extent, yM + .1*max_extent])
# ax.plot([r0_p[0]], [r0_p[1]], 'x', markersize=3, color='black')
# ax.plot([r1_p[0]], [r1_p[1]], 'x', markersize=3, color='black')
ax.set_ylim([-4, 4])
if dim == 3:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
else:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
return scats,
dim = 2
hid_pcs_plot = pcs[0]
if dim == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
ax.set_zlim([-10, 10])
else:
fig, ax = make_fig()
ax.grid(False)
scat1 = ax.scatter(*hid_pcs_plot[:num_plot, :dim].T, c=coloring,
edgecolors=edge_coloring, s=10, linewidths=.65)
ax.plot([0], [0], 'x', markersize=7)
scat2 = ax.scatter([0], [0], marker='x', s=3, c='black')
scat3 = ax.scatter([0], [0], marker='x', s=3, color='black')
scats = [scat1, scat2, scat3]
# ax.plot([0], [0], 'o', markersize=10)
if FEEDFORWARD:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
else:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 21, 26,
31]) # snap_idx = list(range(T + 1))
for i0 in snap_idx:
take_snap(i0, scats, fig, dim=dim, border=False)
print
def _cluster_holdout_test_acc_stat_fun(h, y, clust_identity,
classifier_type='logistic_regression',
num_repeats=5, train_ratio=0.8, seed=11):
np.random.seed(seed)
num_clusts = np.max(clust_identity) + 1
num_clusts_train = int(round(num_clusts*train_ratio))
num_samples = h.shape[0]
test_accs = np.zeros(num_repeats)
train_accs = np.zeros(num_repeats)
for i0 in range(num_repeats):
permutation = np.random.permutation(np.arange(len(clust_identity)))
perm_inv = np.argsort(permutation)
clust_identity_shuffled = clust_identity[permutation]
train_idx = clust_identity_shuffled <= num_clusts_train
test_idx = clust_identity_shuffled > num_clusts_train
hid_train = h[train_idx[perm_inv]]
y_train = y[train_idx[perm_inv]]
y_test = y[test_idx[perm_inv]]
hid_test = h[test_idx[perm_inv]]
if classifier_type == 'svm':
classifier = svm.LinearSVC(random_state=3*i0 + 1)
else:
classifier = linear_model.LogisticRegression(random_state=3*i0 + 1,
solver='lbfgs')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classifier.fit(hid_train, y_train)
train_accs[i0] = classifier.score(hid_train, y_train)
test_accs[i0] = classifier.score(hid_test, y_test)
return train_accs, test_accs
def clust_holdout_over_layers(seeds, gs, train_params,
figname="clust_holdout_over_layers"):
"""
Logistic regression training and testing error on the representation
through the layers. Compares networks trained
with different choices of g_radius (specified by input parameter gs).
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
"""
if not hasattr(gs, '__len__'):
gs = [gs]
layer_label = 'layer'
@memory.cache
def generate_data_table_clust(seeds, gs, train_params):
layer_label = 'layer'
clust_acc_table = pd.DataFrame(
columns=['seed', 'g_radius', 'training', layer_label, 'LR training',
'LR testing'])
train_params_loc = copy.deepcopy(train_params)
for i0, seed in enumerate(seeds):
for i1, g in enumerate(gs):
train_params_loc['g_radius'] = g
train_params_loc['model_seed'] = seed
num_pnts_dim_red = 500
model, params, run_dir = train.initialize_and_train(
**train_params_loc)
class_datasets = params['datasets']
num_train_samples = len(class_datasets['train'])
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(params['model_seed'])
X, Y = class_datasets['train'][:]
if train_params_loc['network'] == 'feedforward':
X0 = X
else:
X0 = X[:, 0]
for epoch, epoch_label in zip([0, -1], ['before', 'after']):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
ds = []
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
ds.extend([{
'seed': seed, 'g_radius': g,
'training': epoch_label, layer_label: lay,
'LR training': stat[0][k], 'LR testing': stat[1][k]
} for k in range(len(stat[0]))])
clust_acc_table = clust_acc_table.append(pd.DataFrame(ds),
ignore_index=True)
clust_acc_table['seed'] = clust_acc_table['seed'].astype('category')
clust_acc_table['g_radius'] = clust_acc_table['g_radius'].astype(
'category')
clust_acc_table['training'] = clust_acc_table['training'].astype(
'category')
return clust_acc_table
clust_acc_table = generate_data_table_clust(seeds, gs, train_params)
layers = set(clust_acc_table[layer_label])
for stage in ['LR training', 'LR testing']:
if stage == 'LR training':
clust_acc_table_stage = clust_acc_table.drop(columns=['LR testing'])
else:
clust_acc_table_stage = clust_acc_table.drop(
columns=['LR training'])
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=est_acc,
ci=ci_acc, style='training',
style_order=['after', 'before'], hue='g_radius')
else:
g1 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=None,
units='seed', style='training',
style_order=['after', 'before'], hue='g_radius',
alpha=0.6)
g2 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator='mean',
ci=None, style='training',
style_order=['after', 'before'], hue='g_radius')
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
ax.set_ylim([-.01, 1.01])
ax.set_xticks(range(len(layers)))
out_fig(fig, figname + '_' + stage, subfolder=train_params[
'network'] +
'/clust_holdout_over_layers/',
show=False, save=True, axis_type=0, name_order=0,
data=clust_acc_table)
plt.close('all')
def get_stats(stat_fun, train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, *args, **kwargs):
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
style_bool = train_params_list_style is not None
if style_bool and style_key is None:
raise ValueError("Please specify a style_key.")
hue_bool = len(train_params_list_hue) > 1
if hue_bool and hue_key is None:
raise ValueError("Please specify a hue_key.")
if seeds is None:
seeds = [train_params_list_hue[0]['model_seed']]
params_cat = [[], []]
params_cat[0] = train_params_list_hue
if style_bool:
params_cat[1] = train_params_list_style
else:
params_cat[1] = [None]
table = pd.DataFrame()
if hue_bool:
table.reindex(columns=table.columns.tolist() + [hue_key])
if style_bool:
table.reindex(columns=table.columns.tolist() + [style_key])
for i0 in range(len(params_cat)): # hue params
for i1 in range(len(params_cat[i0])):
params = params_cat[i0][i1]
table_piece = stat_fun(params, hue_key, style_key, seeds,
*args, **kwargs)
table = table.append(table_piece, ignore_index=True)
if hue_key is not None:
table[hue_key] = table[hue_key].astype('category')
if style_key is not None:
table[style_key] = table[style_key].astype('category')
return table
def dim_through_training(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, figname='',
subdir=None, multiprocess_lock=None):
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/' + 'dim_over_training' + '/'
@memory.cache
def compute_dim_through_training(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
for i_epoch, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
try:
dim = utils.get_effdim(hid[-1],
preserve_gradients=False).item()
except RuntimeError:
print("Dim computation didn't converge.")
dim = np.nan
num_updates = int(
params['num_train_samples_per_epoch']/params[
'batch_size'])*epoch
d = {
'effective_dimension': dim, 'seed': seed,
'epoch_index': i_epoch, 'epoch': epoch,
'num_updates': num_updates
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_through_training, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim, hue=hue_key,
style=style_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=None, units='seed', hue=hue_key,
style=style_key, alpha=.6)
g2 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator='mean', ci=None, hue=hue_key,
style=style_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax.set_ylim([0, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def dim_over_layers(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None, **plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_dim_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses as dc
import typing as tp
import datetime as dt
import decimal
import platform
import pyarrow as pa
import pyarrow.compute as pc
import pandas as pd
import tracdap.rt.metadata as _meta
import tracdap.rt.exceptions as _ex
import tracdap.rt.impl.util as _util
@dc.dataclass(frozen=True)
class DataSpec:
data_item: str
data_def: _meta.DataDefinition
storage_def: _meta.StorageDefinition
schema_def: tp.Optional[_meta.SchemaDefinition]
@dc.dataclass(frozen=True)
class DataPartKey:
@classmethod
def for_root(cls) -> DataPartKey:
return DataPartKey(opaque_key='part_root')
opaque_key: str
@dc.dataclass(frozen=True)
class DataItem:
schema: pa.Schema
table: tp.Optional[pa.Table] = None
batches: tp.Optional[tp.List[pa.RecordBatch]] = None
pandas: tp.Optional[pd.DataFrame] = None
pyspark: tp.Any = None
@dc.dataclass(frozen=True)
class DataView:
trac_schema: _meta.SchemaDefinition
arrow_schema: pa.Schema
parts: tp.Dict[DataPartKey, tp.List[DataItem]]
@staticmethod
def for_trac_schema(trac_schema: _meta.SchemaDefinition):
arrow_schema = DataMapping.trac_to_arrow_schema(trac_schema)
return DataView(trac_schema, arrow_schema, dict())
class _DataInternal:
@staticmethod
def float_dtype_check():
if "Float64Dtype" not in pd.__dict__:
raise _ex.EStartup("TRAC D.A.P. requires Pandas >= 1.2")
class DataMapping:
"""
Map primary data between different supported data frameworks, preserving equivalent data types.
DataMapping is for primary data, to map metadata types and values use
:py:class:`TypeMapping <tracdap.rt.impl.type_system.TypeMapping>` and
:py:class:`TypeMapping <tracdap.rt.impl.type_system.MetadataCodec>`.
"""
__log = _util.logger_for_namespace(_DataInternal.__module__ + ".DataMapping")
# Matches TRAC_ARROW_TYPE_MAPPING in ArrowSchema, tracdap-lib-data
__TRAC_DECIMAL_PRECISION = 38
__TRAC_DECIMAL_SCALE = 12
__TRAC_TIMESTAMP_UNIT = "ms"
__TRAC_TIMESTAMP_ZONE = None
__TRAC_TO_ARROW_BASIC_TYPE_MAPPING = {
_meta.BasicType.BOOLEAN: pa.bool_(),
_meta.BasicType.INTEGER: pa.int64(),
_meta.BasicType.FLOAT: pa.float64(),
_meta.BasicType.DECIMAL: pa.decimal128(__TRAC_DECIMAL_PRECISION, __TRAC_DECIMAL_SCALE),
_meta.BasicType.STRING: pa.utf8(),
_meta.BasicType.DATE: pa.date32(),
_meta.BasicType.DATETIME: pa.timestamp(__TRAC_TIMESTAMP_UNIT, __TRAC_TIMESTAMP_ZONE)
}
# Check the Pandas dtypes for handling floats are available before setting up the type mapping
__PANDAS_FLOAT_DTYPE_CHECK = _DataInternal.float_dtype_check()
__PANDAS_DATETIME_TYPE = pd.to_datetime([]).dtype
# Only partial mapping is possible, decimal and temporal dtypes cannot be mapped this way
__ARROW_TO_PANDAS_TYPE_MAPPING = {
pa.bool_(): pd.BooleanDtype(),
pa.int8(): pd.Int8Dtype(),
pa.int16(): pd.Int16Dtype(),
pa.int32(): pd.Int32Dtype(),
pa.int64(): | pd.Int64Dtype() | pandas.Int64Dtype |
import sys
import os
import pathlib
import argparse
import subprocess
import json
from datetime import datetime
import pandas
def create_parser():
parser = argparse.ArgumentParser(
description="Run a comparison between IPC and our method.")
parser.add_argument(
"-i", "--input", metavar="path/to/input", type=pathlib.Path,
dest="input", help="path to input json(s)", nargs="+")
parser.add_argument(
"-o", "--output", metavar="path/to/combined-profiles.csv",
type=pathlib.Path, dest="output",
default=pathlib.Path("combined-profiles.csv"),
help="path to output CSV")
parser.add_argument(
"--absolute-time", action="store_true", default=False,
help="save absolute times (seconds) instead of percentages")
return parser
def parse_arguments():
parser = create_parser()
args = parser.parse_args()
input = []
for input_file in args.input:
if input_file.is_file() and input_file.suffix == ".json":
input.append(input_file.resolve())
elif input_file.is_dir():
for script_file in input_file.glob('**/*.json'):
input.append(script_file.resolve())
args.input = input
return args
def append_stem(p, stem_suffix):
# return p.with_stem(p.stem + stem_suffix)
return p.parent / (p.stem + stem_suffix + p.suffix)
def combine_profiles(fixtures, absolute_time=False, base_output=None):
fixtures_dir = pathlib.Path(__file__).resolve().parents[1] / "fixtures"
combined_profile = | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from typing import Union, Optional
class TreeEnsemble():
def __init__(self, x: pd.DataFrame, y: np.array,
x_valid: pd.DataFrame= | pd.DataFrame() | pandas.DataFrame |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = pd.DataFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(pd.DataFrame.corr(self.fscores))
return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(len(self.data_.columns))
for i in range(len(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.append(1 / (1 - r2))
vif = pd.DataFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lenlatent):
for j in range(self.lenlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lenlatent, self.lenlatent])
for i in range(self.lenlatent):
for j in range(self.lenlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = pd.DataFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = pd.DataFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.unique(self.LVariables.ix[:, 'target'])
for i in range(len(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "target"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.maximo = maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is pd.core.frame.DataFrame else pd.read_csv(dados)
LVariables = pd.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is pd.core.frame.DataFrame else pd.read_csv(Mcsv)
latent_ = LVariables.values.flatten('F')
latent__ = np.unique(latent_, return_index=True)[1]
# latent = np.unique(latent_)
latent = [latent_[i] for i in sorted(latent__)]
self.lenlatent = len(latent)
# Repeating indicators
if (HOC == 'true'):
data_temp = pd.DataFrame()
for i in range(self.lenlatent):
block = self.data[Variables['measurement']
[Variables['latent'] == latent[i]]]
block = block.columns.values
data_temp = pd.concat(
[data_temp, data[block]], axis=1)
cols = list(data_temp.columns)
counts = Counter(cols)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
cols[cols.index(s)] = s + '.' + str(suffix)
data_temp.columns = cols
doublemanifests = list(Variables['measurement'].values)
counts = Counter(doublemanifests)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
doublemanifests[doublemanifests.index(
s)] = s + '.' + str(suffix)
Variables['measurement'] = doublemanifests
data = data_temp
# End data manipulation
manifests_ = Variables['measurement'].values.flatten('F')
manifests__ = np.unique(manifests_, return_index=True)[1]
manifests = [manifests_[i] for i in sorted(manifests__)]
self.manifests = manifests
self.latent = latent
self.Variables = Variables
self.LVariables = LVariables
data = data[manifests]
data_ = self.normaliza(data)
self.data = data
self.data_ = data_
outer_weights = pd.DataFrame(0, index=manifests, columns=latent)
for i in range(len(Variables)):
outer_weights[Variables['latent'][i]][
Variables['measurement'][i]] = 1
inner_paths = pd.DataFrame(0, index=latent, columns=latent)
for i in range(len(LVariables)):
inner_paths[LVariables['source'][i]][LVariables['target'][i]] = 1
path_matrix = inner_paths.copy()
if method == 'wold':
fscores = pd.DataFrame.dot(data_, outer_weights)
intera = self.lenlatent
intera_ = 1
# LOOP
for iterations in range(0, self.maximo):
contador = contador + 1
if method == 'lohmoller':
fscores = pd.DataFrame.dot(data_, outer_weights)
intera = 1
intera_ = self.lenlatent
# fscores = self.normaliza(fscores) # Old Mode A
for q in range(intera):
# Schemes
if (scheme == 'path'):
for h in range(intera_):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (sum(follow) > 0):
# i ~ follow
inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
fscores.ix[:, follow], fscores.ix[:, i])[0]
predec = (path_matrix.ix[:, i] == 1)
if (sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(len(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'fuzzy'):
for h in range(len(path_matrix)):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (sum(follow) > 0):
ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
:, follow], len(fscores.ix[:, follow].columns), 0)
inner_paths.ix[inner_paths[follow].index, i] = ac
predec = (path_matrix.ix[:, i] == 1)
if (sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(len(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'centroid'):
inner_paths = np.sign(pd.DataFrame.multiply(
pd.DataFrame.corr(fscores), (path_matrix + path_matrix.T)))
elif (scheme == 'factor'):
inner_paths = pd.DataFrame.multiply(
pd.DataFrame.corr(fscores), (path_matrix + path_matrix.T))
elif (scheme == 'horst'):
inner_paths = inner_paths
print(inner_paths)
if method == 'wold':
fscores[self.latent[q]] = pd.DataFrame.dot(
fscores, inner_paths)
elif method == 'lohmoller':
fscores = pd.DataFrame.dot(fscores, inner_paths)
last_outer_weights = outer_weights.copy()
# Outer Weights
for i in range(self.lenlatent):
# Reflexivo / Modo A
if(Variables['mode'][Variables['latent'] == latent[i]]).any() == "A":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
# 1/N (Z dot X)
res_ = (1 / len(data_)) * np.dot(b, a)
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / np.std(res_) # New Mode A
# Formativo / Modo B
elif(Variables['mode'][Variables['latent'] == latent[i]]).any() == "B":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
# (X'X)^-1 X'Y
a_ = np.dot(a.T, a)
inv_ = np.linalg.inv(a_)
res_ = np.dot(np.dot(inv_, a.T),
fscores.ix[:, latent[i]])
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / (np.std(np.dot(data_.ix[:, myindex], res_)))
if method == 'wold':
fscores = pd.DataFrame.dot(fscores, inner_paths)
diff_ = np.max(
np.max((abs(last_outer_weights) - abs(outer_weights))**2))
if (diff_ < (10**(-(self.stopCriterion)))):
self.convergiu = 1
break
# END LOOP
# print(contador)
# Bootstraping trick
if(np.isnan(outer_weights).any().any()):
self.convergiu = 0
return None
# Standardize Outer Weights (w / || scores ||)
divide_ = np.diag(1 / (np.std(np.dot(data_, outer_weights), 0)
* np.sqrt((len(data_) - 1) / len(data_))))
outer_weights = np.dot(outer_weights, divide_)
outer_weights = pd.DataFrame(
outer_weights, index=manifests, columns=latent)
fscores = pd.DataFrame.dot(data_, outer_weights)
# Outer Loadings
outer_loadings = pd.DataFrame(0, index=manifests, columns=latent)
for i in range(self.lenlatent):
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
for j in range(len(a.columns))]
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_loadings.ix[myindex.values, myindex_] = cor_
# Paths
if (regression == 'fuzzy'):
path_matrix_low = path_matrix.copy()
path_matrix_high = path_matrix.copy()
path_matrix_range = path_matrix.copy()
r2 = pd.DataFrame(0, index=np.arange(1), columns=latent)
dependent = np.unique(LVariables.ix[:, 'target'])
for i in range(len(dependent)):
independent = LVariables[LVariables.ix[
:, "target"] == dependent[i]]["source"]
dependent_ = fscores.ix[:, dependent[i]]
independent_ = fscores.ix[:, independent]
if (self.regression == 'ols'):
# Path Normal
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
# model = sm.OLS(dependent_, independent_)
# results = model.fit()
# print(results.summary())
# r2[dependent[i]] = results.rsquared
r2[dependent[i]] = 1 - resid / \
(dependent_.size * dependent_.var())
path_matrix.ix[dependent[i], independent] = coef
# pvalues.ix[dependent[i], independent] = results.pvalues
elif (self.regression == 'fuzzy'):
size = len(independent_.columns)
ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
# plotaIC(dependent_, independent_, size)
ac, awL, awR = (ac[0], awL[0], awR[0]) if (
size == 1) else (ac, awL, awR)
path_matrix.ix[dependent[i], independent] = ac
path_matrix_low.ix[dependent[i], independent] = awL
path_matrix_high.ix[dependent[i], independent] = awR
# Matrix Fuzzy
for i in range(len(path_matrix.columns)):
for j in range(len(path_matrix.columns)):
path_matrix_range.ix[i, j] = str(round(
path_matrix_low.ix[i, j], 3)) + ' ; ' + str(round(path_matrix_high.ix[i, j], 3))
r2 = r2.T
self.path_matrix = path_matrix
self.outer_weights = outer_weights
self.fscores = fscores
#################################
# PLSc
if disattenuate == 'true':
outer_loadings = self.PLSc()
##################################
# Path Effects
indirect_effects = pd.DataFrame(0, index=latent, columns=latent)
path_effects = [None] * self.lenlatent
path_effects[0] = self.path_matrix
for i in range(1, self.lenlatent):
path_effects[i] = pd.DataFrame.dot(
path_effects[i - 1], self.path_matrix)
for i in range(1, len(path_effects)):
indirect_effects = indirect_effects + path_effects[i]
total_effects = indirect_effects + self.path_matrix
if (regression == 'fuzzy'):
self.path_matrix_high = path_matrix_high
self.path_matrix_low = path_matrix_low
self.path_matrix_range = path_matrix_range
self.total_effects = total_effects.T
self.indirect_effects = indirect_effects
self.outer_loadings = outer_loadings
self.contador = contador
self.r2 = r2
def impa(self):
# Unstandardized Scores
scale_ = np.std(self.data, 0)
outer_weights_ = pd.DataFrame.divide(
self.outer_weights, scale_, axis=0)
relativo = pd.DataFrame.sum(outer_weights_, axis=0)
for i in range(len(outer_weights_)):
for j in range(len(outer_weights_.columns)):
outer_weights_.ix[i, j] = (
outer_weights_.ix[i, j]) / relativo[j]
unstandardizedScores = pd.DataFrame.dot(self.data, outer_weights_)
# Rescaled Scores
rescaledScores = pd.DataFrame(0, index=range(
len(self.data)), columns=self.latent)
for i in range(self.lenlatent):
block = self.data[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
maximo = pd.DataFrame.max(block, axis=0)
minimo = pd.DataFrame.min(block, axis=0)
minimo_ = pd.DataFrame.min(minimo)
maximo_ = | pd.DataFrame.max(maximo) | pandas.DataFrame.max |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 15:51:36 2018
@author: huangjin
"""
import pandas as pd
from tqdm import tqdm
import os
def gen_data(df, time_start, time_end):
df = df.sort_values(by=['code','pt'])
df = df[(df['pt']<=time_end)&(df['pt']>=time_start)]
col = [c for c in df.columns if c not in ['code','pt']]
df_tem = df.groupby(['code']).shift(1).fillna(0)
all_data = df[['code','pt']]
for j in tqdm(range(len(col))):
tem = df[col[j]]-df_tem[col[j]]
all_data = pd.concat([all_data, tem], axis=1)
return all_data
def process_data():
industry_name = ['非银金融','纺织服装','有色金属','计算机','交通运输','医药生物','钢铁','家用电器',
'采掘','国防军工','房地产','建筑材料','休闲服务','综合','建筑装饰','银行',
'轻工制造','化工','电子','机械设备','商业贸易','通信','电气设备','公用事业','传媒',
'农林牧渔','食品饮料','汽车']
industry_name_english = ['Non bank finance', 'textile and clothing', 'non-ferrous metals',
'computer', 'transportation', 'medical biology', 'steel',
'household appliances','Excavation','Defense Force',
'Real Estate', 'Building Materials', 'Leisure Services',
'Comprehensive', 'Architectural Decoration', 'Bank',
'Light manufacturing', 'Chemical', 'Electronic', 'Mechanical equipment',
'Commercial trade', 'Communication', 'Electrical equipment', 'Utilities',
'Media','Agriculture and fishing', 'food and beverage', 'car']
for industry_name_i in range(len(industry_name)):
# 市场值
market_value = pd.read_csv('market_values_end.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
market_values = pd.merge(stock_info_tem, market_value, how='left', on = 'code')
market_values.drop('level_0', axis=1, inplace=True)
# 资产负债表
data_all = pd.read_csv('financial_report_balance_sheet.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
balance_data = pd.merge(stock_info_tem, data_all, how='left', on = 'code')
balance_data.drop('level_0', axis=1, inplace=True)
print(balance_data.shape)
# 利润表
data_all = | pd.read_csv('quant_financial_report_profitloss.csv') | pandas.read_csv |
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
tm.assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
| tm.assert_series_equal(res, exp) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 09:59:35 2020
this file should visualize differences between the replicates
@author: Thomsn
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
def standardsort(bigdir, xaxis, sortlist=None, pnw_topo=None):
os.chdir(bigdir)
rep_list = [diro for diro in os.listdir() if 'rep_' in diro]
rep_list.sort()
if sortlist:
rep_list = [co for _, co in sorted(zip(sortlist[:-1], rep_list))]
rep_mat = pd.DataFrame(columns = xaxis)# np.append(xaxis, 'topo'))
rep_arr = np.array([])
topolist = []
tclist = []
shortopolist = []
if 'pnw_topo' in locals():
pntopolist = []
pntclist = []
pnshortopolist = []
for repsdir in rep_list:
rep_ind = | pd.read_csv(f'{repsdir}/replicate_data.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""OREGON Arrest Analysis (anna).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kchDTfhL69aAuNMBJmlfIrSrP8j7bBrJ
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab
from math import pi
from google.colab import files
uploaded = files.upload()
data_path = "oregonfinalversion1.csv"
arrests = pd.read_csv(data_path, index_col=0)
#arrests.loc[' STATEWIDE TOTAL']
arrests.head()
race = ['White', 'Black', 'Other']
totalArrests = [83117, 3307, 10470]
fig, ax=plt.subplots(figsize = (5,5)) ##creating bar chart
plt.bar(race, totalArrests)
plt.xlabel('Race')
plt.ylabel('Total Arrests')
plt.title('Total Arrests in Oregon in 2019 by Race')
for i, data in enumerate(totalArrests): ##adding labels to bars
plt.text(x=i, y=data+1, s=f"{data}", ha='center')
plt.show()
r = ['White', 'Black', 'Other']
rawData = {'greenBars':[83117, 3307,10470], 'blueBars':[3163765,92680,956292]}
df = pd.DataFrame(rawData)
totals = [i+j for i,j in zip(df['greenBars'], df['blueBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['blueBars'], totals)]
fig, ax=plt.subplots(figsize = (5,10))
barWidth = 0.85
names = race
plt.bar(r, greenBars, color='#7F8DA8', edgecolor='white', width=barWidth)
plt.bar(r, blueBars, bottom=greenBars, color='#FADFC3', edgecolor='white', width=barWidth)
plt.xticks(r, names)
plt.xlabel("Race")
plt.ylabel("Percentage")
plt.ylim(0,30)
plt.title('Arrests as % of Population in Oregon')
plt.show()
r2 = ['White', 'Black','Other']
rawData = {'greenBars':[10979,1335,2522], 'blueBars':[83117,3307,10470]}
df = pd.DataFrame(rawData)
totals = [i+j for i,j in zip(df['greenBars'], df['blueBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['blueBars'], totals)]
fig, ax=plt.subplots(figsize = (5,10))
barWidth = 0.85
names = race
plt.bar(r2, greenBars, color='#7F8DA8', edgecolor='white', width=barWidth)
plt.bar(r2, blueBars, bottom=greenBars, color='#FADFC3', edgecolor='white', width=barWidth)
plt.xticks(r2)
plt.ylim(0,30)
plt.xlabel("Race")
plt.ylabel("Percentage")
plt.title('Prison population as a proportion of arrests in Oregon')
plt.show()
from google.colab import files
uploaded = files.upload()
data_path = "Oregonarrestoffencetypeanna.csv"
arrests = | pd.read_csv(data_path, index_col=0) | pandas.read_csv |
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
pdt.assert_frame_equal(test_df, null_df)
def test_parse_json_nan():
test_df = request_handling.parse_json("""
{"values":[
{"timestamp": "2018-10-29T12:00:00Z", "value": 32.93, "quality_flag": 0},
{"timestamp": "2018-10-29T13:00:00Z", "value": 25.17, "quality_flag": 0},
{"timestamp": "2018-10-29T14:00:00Z", "value": null, "quality_flag": 1},
{"timestamp": "2018-10-29T15:00:00Z", "value": null, "quality_flag": 0}
]}
""")
pdt.assert_frame_equal(test_df, null_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'text/csv'),
(csv_string, 'application/vnd.ms-excel'),
(json_string, 'application/json')
])
def test_parse_values_success(app, data, mimetype):
with app.test_request_context():
test_df = request_handling.parse_values(data, mimetype)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'application/fail'),
(json_string, 'image/bmp'),
])
def test_parse_values_failure(data, mimetype):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_values(data, mimetype)
@pytest.mark.parametrize('dt_string,expected', [
('20190101T1200Z', pd.Timestamp('20190101T1200Z')),
('20190101T1200', pd.Timestamp('20190101T1200Z')),
('20190101T1200+0700', pd.Timestamp('20190101T0500Z'))
])
def test_parse_to_timestamp(dt_string, expected):
parsed_dt = request_handling.parse_to_timestamp(dt_string)
assert parsed_dt == expected
@pytest.mark.parametrize('dt_string', [
'invalid datetime',
'21454543251345234',
'20190101T2500Z',
'NaT',
])
def test_parse_to_timestamp_error(dt_string):
with pytest.raises(ValueError):
request_handling.parse_to_timestamp(dt_string)
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1150Z')),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0400Z']), 120, None),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0001Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z')),
# out of order
pytest.param(
pd.DatetimeIndex(['2019-09-01T0013Z', '2019-09-01T0006Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z'), marks=pytest.mark.xfail),
(pd.date_range(start='2019-03-10 00:00', end='2019-03-10 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.date_range(start='2019-11-03 00:00', end='2019-11-03 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.DatetimeIndex(['2019-01-01T000132Z']), 33, None),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000132Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2019-01-02T000132Z'))
])
def test_validate_index_period(index, interval_length, previous_time):
request_handling.validate_index_period(index, interval_length,
previous_time)
def test_validate_index_empty():
with pytest.raises(request_handling.BadAPIRequest):
request_handling.validate_index_period(pd.DatetimeIndex([]), 10,
None)
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0300Z']), 60),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0300Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='20min'), 10),
])
def test_validate_index_period_missing(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'Missing' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0200Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0045Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='5min'), 10),
])
def test_validate_index_period_extra(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'extra' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0201Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0130Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1305Z',
freq='5min'), 10),
])
def test_validate_index_period_other(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) > 0
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1155Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000232Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2020-12-01T000232Z'))
])
def test_validate_index_period_previous(index, interval_length, previous_time):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
previous_time)
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'previous time' in errs[0]
@pytest.mark.parametrize('ep,res', [
('{"restrict_upload": true}', True),
('{"restrict_upload": true, "other_key": 1}', True),
('{"restrict_upload" : true}', True),
('{"restrict_upload" : True}', True),
('{"restrict_upload": 1}', False),
('{"restrict_upload": false}', False),
('{"restrict_uploa": true}', False),
('{"upload_restrict_upload": true}', False),
])
def test__restrict_in_extra(ep, res):
assert request_handling._restrict_in_extra(ep) is res
def test__current_utc_timestamp():
t = request_handling._current_utc_timestamp()
assert isinstance(t, pd.Timestamp)
assert t.tzinfo == pytz.utc
def test_restrict_upload_window_noop():
assert request_handling.restrict_forecast_upload_window(
'', None, None) is None
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
( | pd.Timestamp('2019-11-01T00:00Z') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 14:48:16 2020
@author: <NAME>
"""
import json
import requests
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
import math
import time
import calendar
from datetime import date, datetime
def india_world_pred():
plt.close('all')
plt.rcParams.update({'figure.max_open_warning': 0})
#**** INDIA ****
response = requests.get("https://corona.lmao.ninja/v3/covid-19/historical/india?lastdays=all")
india = json.loads(response.text)
cases=[]
deaths=[]
rec=[]
for i in india['timeline']:
for j in india['timeline'][i].items():
if i == 'cases':
cases.append(j)
elif i == 'deaths':
deaths.append(j)
else:
rec.append(j)
#creating dataframe in the structure acceptable by fb prophet
cases = pd.DataFrame(cases,columns = ['ds','y'])
deaths = pd.DataFrame(deaths,columns = ['ds','y'])
rec=pd.DataFrame(rec,columns = ['ds','y'])
#modifying the time
#year = (datetime.now()).strftime('%Y')
dates_list = []
for i in range(len(cases)):
a = cases['ds'][i].split("/")
b = a[1]+' '+calendar.month_abbr[int(a[0])]+' 20'+ a[2]
dates_list.append(b)
dates_list = pd.DataFrame(dates_list,columns = ['Date'])
#creating csv file for india
original = pd.concat([dates_list['Date'],cases['y'],deaths['y'],rec['y']],
axis=1,sort=False)
original.columns = ['Date','Cases','Deaths','Recoveries']
original.to_csv("data/india_original.csv")
#converting values to log
cases['y'] = np.log(cases['y'])
deaths['y'] = np.log(deaths['y'])
rec['y'] = np.log(rec['y'])
#replacing infinite values with 0
for i in range(len(cases)):
if math.isinf(cases['y'][i]):
cases['y'][i]=0
if math.isinf(deaths['y'][i]):
deaths['y'][i]=0
if math.isinf(rec['y'][i]):
rec['y'][i]=0
###predicting cases using fb prophet
m = Prophet()
m.add_seasonality(name='daily', period=40.5, fourier_order=5)
m.fit(cases)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting the model and saving it for cases
plot_locations = []
fig = m.plot(forecast)
location = "static/img/plot/india_plot_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/india_components_cases" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
#final dataframe for cases
final_cases = pd.DataFrame()
final_cases['ds'] = forecast['ds']
final_cases['y'] = np.exp(forecast['yhat'])
final_cases = final_cases.iloc[(len(final_cases)-7):,:].reset_index()
final_cases.drop(columns = 'index',inplace=True)
###predicting deaths using fb prophet model
m = Prophet()
m.add_seasonality(name='daily', period=40.5, fourier_order=5)
m.fit(deaths)
future = m.make_future_dataframe(periods=7)
# future.tail()
forecast = m.predict(future)
# forecast[['ds', 'yhat']].tail()
#plotting the model and saving it for deaths
fig = m.plot(forecast)
location = "static/img/plot/india_plot_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
fig.legend(("actual","fitted","80% confidence interval"),loc='center right',fontsize=10)
plt.savefig(location,figsize=(15,7),dpi=250)
fig = m.plot_components(forecast)
location = "static/img/plot/india_component_deaths" + str(date.today()) + ".png"
plot_locations = plot_locations + [location]
plt.savefig(location,figsize=(15,7),dpi=250)
#final dataframe for deaths
final_deaths = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os, errno
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
from scipy.spatial.distance import squareform
from sklearn.decomposition.nmf import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
def save_df_to_npz(obj, filename):
np.savez_compressed(filename, data=obj.values, index=obj.index.values, columns=obj.columns.values)
def save_df_to_text(obj, filename):
obj.to_csv(filename, sep='\t')
def load_df_from_npz(filename):
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1,1))
D += squared_norms.reshape((1,-1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return(beta)
def fast_ols_all_cols_df(X,Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return(beta)
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean**2)
return(var)
def get_highvar_genes_sparse(expression, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy(); E2.data **= 2; gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean**2))
del(E2)
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var)/gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = ((gene_fano > w_fano_low) &
(gene_fano < w_fano_high) &
(gene_mean > w_mean_low) &
(gene_mean < w_mean_high))
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_mean + (B**2)
fano_ratio = (gene_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_mean,
'var': gene_var,
'fano': gene_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(input_counts, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var/gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var)/gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = ((gene_counts_fano > w_fano_low) &
(gene_counts_fano < w_fano_high) &
(gene_counts_mean > w_mean_low) &
(gene_counts_mean < w_mean_high))
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_counts_mean + (B**2)
fano_ratio = (gene_counts_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_counts_mean,
'var': gene_counts_var,
'fano': gene_counts_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
sc.pp.normalize_per_cell(tpm, counts_per_cell_after=1e6)
return(tpm)
class cNMF():
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = '%s_%s' % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, 'cnmf_tmp'))
self.paths = {
'normalized_counts' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.norm_counts.h5ad'),
'nmf_replicate_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_params.df.npz'),
'nmf_run_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_idvrun_params.yaml'),
'nmf_genes_list' : os.path.join(self.output_dir, self.name, self.name+'.overdispersed_genes.txt'),
'tpm' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm.h5ad'),
'tpm_stats' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm_stats.df.npz'),
'iter_spectra' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.iter_%d.df.npz'),
'iter_usages' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.usages.k_%d.iter_%d.df.npz'),
'merged_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.merged.df.npz'),
'local_density_cache': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.local_density_cache.k_%d.merged.df.npz'),
'consensus_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.dt_%s.consensus.df.npz'),
'consensus_spectra__txt': os.path.join(self.output_dir, self.name, self.name+'.spectra.k_%d.dt_%s.consensus.txt'),
'consensus_usages': os.path.join(self.output_dir, self.name, 'cnmf_tmp',self.name+'.usages.k_%d.dt_%s.consensus.df.npz'),
'consensus_usages__txt': os.path.join(self.output_dir, self.name, self.name+'.usages.k_%d.dt_%s.consensus.txt'),
'consensus_stats': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.stats.k_%d.dt_%s.df.npz'),
'clustering_plot': os.path.join(self.output_dir, self.name, self.name+'.clustering.k_%d.dt_%s.png'),
'gene_spectra_score': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_score.k_%d.dt_%s.df.npz'),
'gene_spectra_score__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_score.k_%d.dt_%s.txt'),
'gene_spectra_tpm': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_tpm.k_%d.dt_%s.df.npz'),
'gene_spectra_tpm__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_tpm.k_%d.dt_%s.txt'),
'k_selection_plot' : os.path.join(self.output_dir, self.name, self.name+'.k_selection.png'),
'k_selection_stats' : os.path.join(self.output_dir, self.name, self.name+'.k_selection_stats.df.npz'),
}
def get_norm_counts(self, counts, tpm,
high_variance_genes_filter = None,
num_highvar_genes = None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such that
no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data matching
counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using the
parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many most overdispersed genes
for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns (genes)normalized to unit
variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(tpm.X, numgenes=num_highvar_genes)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(np.array(tpm.X), numgenes=num_highvar_genes)
high_variance_genes_filter = list(tpm.var.index[gene_counts_stats.high_var.values])
## Subset out high-variance genes
norm_counts = counts[:, high_variance_genes_filter]
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print('Warning NaNs in normalized counts matrix')
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print('Warning NaNs in normalized counts matrix')
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths['nmf_genes_list'], 'w').write('\n'.join(high_variance_genes_filter))
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1)==0
if zerocells.sum()>0:
examples = norm_counts.obs.index[zerocells]
print('Warning: %d cells have zero counts of overdispersed genes. E.g. %s' % (zerocells.sum(), examples[0]))
print('Consensus step may not run when this is the case')
return(norm_counts)
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
sc.write(self.paths['normalized_counts'], norm_counts)
def get_nmf_iter_params(self, ks, n_iter = 100,
random_state_seed = None,
beta_loss = 'kullback-leibler'):
"""
Create a DataFrame with parameters for NMF iterations.
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several ``k`` are specified, this many
iterations will be run for each value of ``k``.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks)* n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2**32)-1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(replicate_params, columns = ['n_components', 'iter', 'nmf_seed'])
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver='mu',
tol=1e-4,
max_iter=400,
regularization=None,
init='random'
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == 'frobenius':
_nmf_kwargs['solver'] = 'cd'
return(replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths['nmf_replicate_parameters'])
with open(self.paths['nmf_run_parameters'], 'w') as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to ``non_negative_factorization``
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return(spectra, usages)
def run_nmf(self,
worker_i=1, total_workers=1,
):
"""
Iteratively run NMF with prespecified parameters.
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from self.paths['nmf_run_parameters'], defaults below::
``non_negative_factorization`` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of ``normalize_counts``)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of ``prepare_nmf_iter_params``)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
norm_counts = sc.read(self.paths['normalized_counts'])
_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
jobs_for_this_worker = worker_filter(range(len(run_params)), worker_i, total_workers)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print('[Worker %d]. Starting task %d.' % (worker_i, idx))
_nmf_kwargs['random_state'] = p['nmf_seed']
_nmf_kwargs['n_components'] = p['n_components']
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(spectra,
index=np.arange(1, _nmf_kwargs['n_components']+1),
columns=norm_counts.var.index)
save_df_to_npz(spectra, self.paths['iter_spectra'] % (p['n_components'], p['iter']))
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
print('Combining factorizations for k=%d.'%k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components==k)
run_params_subset = run_params[run_params.n_components==k].sort_values('iter')
spectra_labels = []
for i,p in run_params_subset.iterrows():
spectra = load_df_from_npz(self.paths['iter_spectra'] % (p['n_components'], p['iter']))
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p['iter'], :, :] = spectra.values
for t in range(k):
spectra_labels.append('iter%d_topic%d'%(p['iter'], t+1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(combined_spectra, columns=spectra.columns, index=spectra_labels)
save_df_to_npz(combined_spectra, self.paths['merged_spectra']%k)
return combined_spectra
def consensus(self, k, density_threshold_str='0.5', local_neighborhood_size = 0.30,show_clustering = False,
skip_density_and_return_after_stats = False, close_clustergram_fig=True):
merged_spectra = load_df_from_npz(self.paths['merged_spectra']%k)
norm_counts = sc.read(self.paths['normalized_counts'])
if skip_density_and_return_after_stats:
density_threshold_str = '2'
density_threshold_repl = density_threshold_str.replace('.', '_')
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0]/k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T/np.sqrt((merged_spectra**2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths['local_density_cache'] % k):
local_density = load_df_from_npz(self.paths['local_density_cache'] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors+1)[:, :n_neighbors+1]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[np.arange(topics_dist.shape[0])[:, None], partitioning_order]
local_density = pd.DataFrame(distance_to_nearest_neighbors.sum(1)/(n_neighbors),
columns=['local_density'],
index=l2_spectra.index)
save_df_to_npz(local_density, self.paths['local_density_cache'] % k)
del(partitioning_order)
del(distance_to_nearest_neighbors)
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(kmeans_model.labels_+1, index=l2_spectra.index)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T/median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(l2_spectra.values, kmeans_cluster_labels, metric='euclidean')
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
refit_nmf_kwargs.update(dict(
n_components = k,
H = median_spectra.values,
update_H = False
))
_, rf_usages = self._nmf(norm_counts.X,
nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(rf_usages, index=norm_counts.obs.index, columns=median_spectra.index)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = ((norm_counts.X.todense() - rf_pred_norm_counts)**2).sum().sum()
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts)**2).sum().sum()
consensus_stats = pd.DataFrame([k, density_threshold, stability, prediction_error],
index = ['k', 'local_density_threshold', 'stability', 'prediction_error'],
columns = ['stats'])
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(median_spectra, self.paths['consensus_spectra']%(k, density_threshold_repl))
save_df_to_npz(rf_usages, self.paths['consensus_usages']%(k, density_threshold_repl))
save_df_to_npz(consensus_stats, self.paths['consensus_stats']%(k, density_threshold_repl))
save_df_to_text(median_spectra, self.paths['consensus_spectra__txt']%(k, density_threshold_repl))
save_df_to_text(rf_usages, self.paths['consensus_usages__txt']%(k, density_threshold_repl))
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths['tpm'])
tpm_stats = load_df_from_npz(self.paths['tpm_stats'])
if sp.issparse(tpm.X):
norm_tpm = (np.array(tpm.X.todense()) - tpm_stats['__mean'].values) / tpm_stats['__std'].values
else:
norm_tpm = (tpm.X - tpm_stats['__mean'].values) / tpm_stats['__std'].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(usage_coef, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(usage_coef, self.paths['gene_spectra_score']%(k, density_threshold_repl))
save_df_to_text(usage_coef, self.paths['gene_spectra_score__txt']%(k, density_threshold_repl))
# Convert spectra to TPM units, and obtain results for all genes by running last step of NMF
# with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(
H = norm_usages.T.values,
))
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(spectra_tpm, self.paths['gene_spectra_tpm']%(k, density_threshold_repl))
save_df_to_text(spectra_tpm, self.paths['gene_spectra_tpm__txt']%(k, density_threshold_repl))
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][:, density_filter.values]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels==cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[cl_dist < 0] = 0 #Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, 'average')
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(len(height_ratios), len(width_ratios), fig,
0.01, 0.01, 0.98, 0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0, hspace=0)
dist_ax = fig.add_subplot(gs[1,1], xscale='linear', yscale='linear',
xticks=[], yticks=[],xlabel='', ylabel='',
frameon=True)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(D, interpolation='none', cmap='viridis', aspect='auto',
rasterized=True)
left_ax = fig.add_subplot(gs[1,0], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
left_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
top_ax = fig.add_subplot(gs[0,1], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
top_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
hist_gs = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1, 3],
wspace=0, hspace=0)
hist_ax = fig.add_subplot(hist_gs[0,0], xscale='linear', yscale='linear',
xlabel='', ylabel='', frameon=True, title='Local density histogram')
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle='--', color='k')
hist_ax.text(density_threshold + 0.02, ylim[1] * 0.95, 'filtering\nthreshold\n\n', va='top')
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel('Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering'%(sum(~density_filter), len(density_filter), 100*(~density_filter).mean()))
fig.savefig(self.paths['clustering_plot']%(k, density_threshold_repl), dpi=250)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
'''
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
'''
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(self.consensus(k, skip_density_and_return_after_stats=True).stats)
stats = pd.DataFrame(stats)
stats.reset_index(drop = True, inplace = True)
save_df_to_npz(stats, self.paths['k_selection_stats'])
fig = plt.figure(figsize=(6, 4))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(stats.k, stats.stability, 'o-', color='b')
ax1.set_ylabel('Stability', color='b', fontsize=15)
for tl in ax1.get_yticklabels():
tl.set_color('b')
#ax1.set_xlabel('K', fontsize=15)
ax2.plot(stats.k, stats.prediction_error, 'o-', color='r')
ax2.set_ylabel('Error', color='r', fontsize=15)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.set_xlabel('Number of Components', fontsize=15)
ax1.grid('on')
plt.tight_layout()
fig.savefig(self.paths['k_selection_plot'], dpi=250)
if close_fig:
plt.close(fig)
if __name__=="__main__":
"""
Example commands for now:
output_dir="/Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/"
python cnmf.py prepare --output-dir $output_dir \
--name test --counts /Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/test_data.df.npz \
-k 6 7 8 9 --n-iter 5
python cnmf.py factorize --name test --output-dir $output_dir
THis can be parallelized as such:
python cnmf.py factorize --name test --output-dir $output_dir --total-workers 2 --worker-index WORKER_INDEX (where worker_index starts with 0)
python cnmf.py combine --name test --output-dir $output_dir
python cnmf.py consensus --name test --output-dir $output_dir
"""
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('command', type=str, choices=['prepare', 'factorize', 'combine', 'consensus', 'k_selection_plot'])
parser.add_argument('--name', type=str, help='[all] Name for analysis. All output will be placed in [output-dir]/[name]/...', nargs='?', default='cNMF')
parser.add_argument('--output-dir', type=str, help='[all] Output directory. All output will be placed in [output-dir]/[name]/...', nargs='?', default='.')
parser.add_argument('-c', '--counts', type=str, help='[prepare] Input (cell x gene) counts matrix as df.npz or tab delimited text file')
parser.add_argument('-k', '--components', type=int, help='[prepare] Numper of components (k) for matrix factorization. Several can be specified with "-k 8 9 10"', nargs='+')
parser.add_argument('-n', '--n-iter', type=int, help='[prepare] Numper of factorization replicates', default=100)
parser.add_argument('--total-workers', type=int, help='[all] Total number of workers to distribute jobs to', default=1)
parser.add_argument('--seed', type=int, help='[prepare] Seed for pseudorandom number generation', default=None)
parser.add_argument('--genes-file', type=str, help='[prepare] File containing a list of genes to include, one gene per line. Must match column labels of counts matrix.', default=None)
parser.add_argument('--numgenes', type=int, help='[prepare] Number of high variance genes to use for matrix factorization.', default=2000)
parser.add_argument('--tpm', type=str, help='[prepare] Pre-computed (cell x gene) TPM values as df.npz or tab separated txt file. If not provided TPM will be calculated automatically', default=None)
parser.add_argument('--beta-loss', type=str, choices=['frobenius', 'kullback-leibler', 'itakura-saito'], help='[prepare] Loss function for NMF.', default='frobenius')
parser.add_argument('--densify', dest='densify', help='[prepare] Treat the input data as non-sparse', action='store_true', default=False)
parser.add_argument('--worker-index', type=int, help='[factorize] Index of current worker (the first worker should have index 0)', default=0)
parser.add_argument('--local-density-threshold', type=str, help='[consensus] Threshold for the local density filtering. This string must convert to a float >0 and <=2', default='0.5')
parser.add_argument('--local-neighborhood-size', type=float, help='[consensus] Fraction of the number of replicates to use as nearest neighbors for local density filtering', default=0.30)
parser.add_argument('--show-clustering', dest='show_clustering', help='[consensus] Produce a clustergram figure summarizing the spectra clustering', action='store_true')
args = parser.parse_args()
cnmf_obj = cNMF(output_dir=args.output_dir, name=args.name)
cnmf_obj._initialize_dirs()
if args.command == 'prepare':
if args.counts.endswith('.h5ad'):
input_counts = sc.read(args.counts)
else:
## Load txt or compressed dataframe and convert to scanpy object
if args.counts.endswith('.npz'):
input_counts = load_df_from_npz(args.counts)
else:
input_counts = pd.read_csv(args.counts, sep='\t', index_col=0)
if args.densify:
input_counts = sc.AnnData(X=input_counts.values,
obs= | pd.DataFrame(index=input_counts.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Script to count the number of process exclusions per policy.
Description
-----------
This script:
1.) Connects to the Cisco AMP for Endpoints API.
2.) Enumberates all of the policies in the environment.
3.) Downloads a copy of the policy.xml file for each policy and stores
it in the "output" "policy_files" folder.
4.) Evaluates the exclusions for each policy.
5.) Creates tables with the number of file and policy exclusions for
each policy.
6.) Stores the tables in csv format to the "output" folder.
7.) Generates a Caution and Warning message for policies that have a
large number of process exclusions.
Configure
---------
To use the script:
1.) Configure the "api.cfg" file in the "config" folder with your AMP
API client ID.
2.) Create credential in Windows "Credential Manager"
a.) Under "Windows Credentials" click "Add a generic credential".
b.) For "Internet or network address:" enter "AMP".
c.) For "User name:" enter the AMP API client ID
d.) For "Password:" enter the AMP API client secret
File Output
-----------
Policy.xml Files:
Policy.xml files are stored as <policy_name>.xml in the "policy_files"
folder within the "output" folder.
Path_exculsions.csv File:
The count of path exclusions by policy name can be found in the
"path_exclusions.csv" file within the "output" folder.
Process_exclusions.csv File:
The count of process exclusions by policy name can be found in the
"process_exclusions.csv" file within the "output" folder. This output
file includes the number of process exclusions that apply to all
child processes.
Screen Output
-------------
The script outputs:
File exclusion count for policy by type
<table>
Process exclusion types by policy
<table>
CAUTION: Policy has close to the maximum of 100 process exceptions.
<policy_name> has <number> exceptions
WARNING: Policy exceeds the maximum 100 process exceptions.
<policy_name> has <number> exceptions
"""
import configparser
import logging
from logging.config import fileConfig
import xml.etree.ElementTree as ET
import concurrent.futures
import requests
import keyring
import pandas as pd
from tabulate import tabulate
import numpy as np
class AMP4EP():
"""AMP4EP class.
Attributes
----------
cfg_file: Configuration file path
client_id: Cisco AMP for Endpoint API Client ID
limit: Number of policies to return
url: Cisco AMP for Endpoints API address
clt_pwd: Cisco AMP for Endpoints API password
session: Request dession to the AMP API
"""
# Set the config file path
cfg_file = r'.\config\api.cfg'
# Read the config file
config = configparser.ConfigParser()
config.read(cfg_file)
# Parse settings from config file and assign to class attributes
clt_id = config.get('AMP', 'amp_client_id')
limit = config.get('AMP', 'limit')
url = config.get('AMP', 'amp_host')
# Get password from the keyring
clt_pwd = keyring.get_password("AMP", clt_id)
# Create AMP session
session = requests.session()
session.auth = (clt_id, clt_pwd)
@classmethod
def get_policy_list(cls):
"""
Get list of policies.
Parameters
----------
None.
Returns
-------
polices : Object
A list of policies.
"""
LOG.debug('Attempting to get policy list')
url = "{0}policies?limit={1}".format(cls.url, cls.limit)
headers = {'Content-Type': 'application/json'}
response = cls.session.get(url, headers=headers)
HttpResp.status(response, url)
policies = response.json()
p_count = str(len(policies['data']))
LOG.info('Retrieved list of %s policies', p_count)
return policies['data']
@classmethod
def get_policy(cls, policy):
"""
Get policy xml data.
Parameters
----------
policy : Objects
Specific policy to get xml data for.
Returns
-------
policy_detail : Object
A list of computer guids to move.
<policy_name>.xml : file
Policy XML file used by AMP4EP.
"""
# Set Policy Data
p_name = policy['name']
p_link = policy['links']['policy']
LOG.debug('Attempting to get details for \"%s\"', p_name)
# Get data from AMP API
url = "{0}.xml".format(p_link)
response = cls.session.get(url)
HttpResp.status(response, p_name)
# Write policy.xml file to output folder
LOG.debug('Attempting to write policy.xml for \"%s\"', p_name)
file_out = '.\\output\\policy_files\\' + p_name + '.xml'
with open(file_out, 'w') as file:
file.write(response.text)
# Parse XML results
doc = ET.fromstring(response.content)
tree = ET.ElementTree(doc)
root = tree.getroot()
return root
@classmethod
def parse_path_exclusions(cls, policy, xml):
"""
Get policy xml data.
Parameters
----------
policy : Object
Specific policy metadata.
xml : Object
Specific policy data stored in the policy xml file.
Returns
-------
e_paths : Dataframe
A list of path exclusions found in policies.
"""
# Create Variables
e_paths = pd.DataFrame()
# Set policy data
p_name = policy['name']
p_guid = policy['guid']
# Find Exclusions
LOG.debug('Attempting to find path exclusions for \"%s\"', p_name)
# Get Exclusions
for xml_exclusion in xml.iter(
'{http://www.w3.org/2000/09/xmldsig#}exclusions'):
# Get Path Exclusions
for xml_path in xml_exclusion.iter(
'{http://www.w3.org/2000/09/xmldsig#}info'):
for xml_item in xml_path:
e_class = 'path'
exclusion = xml_item.text.split("|")
e_item = {'p_name': p_name,
'p_guid': p_guid,
'e_class': e_class,
'e_type': exclusion[1],
'e_value': exclusion[4]}
e_paths = e_paths.append(e_item, ignore_index=True)
return e_paths
@classmethod
def parse_process_exclusions(cls, policy, xml):
"""
Get policy xml data.
Parameters
----------
policy : Object
Specific policy metadata.
xml : Object
Specific policy data stored in the policy xml file.
Returns
-------
e_processes : Dataframe
A list of process exclusions found in policies.
"""
# Create Variables
e_processes = pd.DataFrame()
# Set policy data
p_name = policy['name']
p_guid = policy['guid']
# Find Exclusions
LOG.debug('Attempting to find process exclusions for \"%s\"', p_name)
# Get Process Exclusions
for xml_exclusion in xml.iter(
'{http://www.w3.org/2000/09/xmldsig#}exclusions'):
for xml_process in xml_exclusion.iter(
'{http://www.w3.org/2000/09/xmldsig#}process'):
for xml_item in xml_process:
e_class = 'process'
exclusion = xml_item.text.split("|")
e_flag = int(exclusion[4])
e_item = {'Policy Name': p_name,
'Policy GUID': p_guid,
'Polcy Class': e_class,
'Exclusion Version': exclusion[0],
'Exclusion Auth Type': exclusion[1],
'Exclusion Hash': exclusion[2],
'Exclusion Path': exclusion[3],
'Exclusion Flag': int(e_flag),
'File Scan Child': bool(e_flag & (0b1 << 0)),
'Scan Files Written': bool(e_flag & (0b1 << 1)),
'System Process Protection': bool(
e_flag & (0b1 << 2)),
'System Process Protection Child': bool(
e_flag & (0b1 << 3)),
'Malicious Activity': bool(e_flag & (0b1 << 4)),
'Malicious Activity Child': bool(
e_flag & (0b1 << 5)),
'Self Protect': bool(e_flag & (0b1 << 6)),
'Self Protect Child': bool(e_flag & (0b1 << 7)),
'Behavioral Protection': bool(
e_flag & (0b1 << 8)),
'Behavioral Protection Child': bool(
e_flag & (0b1 << 9))}
e_processes = e_processes.append(e_item, ignore_index=True)
return e_processes
@classmethod
def exclusion_report(cls, path, process):
"""
Get policy xml data.
Parameters
----------
path : Dataframe
Dataframe of path exclusions
process : Dataframe
Datafrome of process exclusions.
Returns
-------
None.
"""
# Set Pandas display parameters
pd.options.display.max_columns = None
pd.options.display.width = None
# Cross Tabulate Path Exclusions
LOG.debug('Creating Path Exclusion Summary')
path_exc = pd.crosstab(path.p_name,
path.e_type,
rownames=['Policy Name'],
margins=True,
margins_name='Total Exclusions')
path_exc = path_exc.rename(columns={'1': 'Threat',
'2': 'Path',
'3': 'File Extension',
'4': 'File Name',
'5': 'Process',
'6': 'Wildcard'})
path_exc = path_exc.drop(path_exc.index[len(path_exc) - 1])
# Calculate Process Exclusions
LOG.debug('Creating Process Exclusion Summary')
process['File Scan'] = process['Exclusion Flag'].apply(
lambda x: x in range(0, 3), True)
pvt_process = pd.pivot_table(process,
values=['Policy GUID',
'File Scan',
'File Scan Child',
'System Process Protection',
'System Process Protection Child',
'Malicious Activity',
'Malicious Activity Child',
'Self Protect',
'Self Protect Child',
'Behavioral Protection',
'Behavioral Protection Child'],
index='Policy Name',
aggfunc={'Policy GUID': len,
'File Scan': np.sum,
'File Scan Child': np.sum,
'System Process Protection':
np.sum,
'System Process Protection Child':
np.sum,
'Malicious Activity': np.sum,
'Malicious Activity Child':
np.sum,
'Self Protect': np.sum,
'Self Protect Child': np.sum,
'Behavioral Protection': np.sum,
'Behavioral Protection Child':
np.sum},
fill_value=0)
pvt_process = pvt_process.rename(columns={'Policy GUID':
'Total Exclusions'})
pvt_process = pvt_process[['File Scan',
'File Scan Child',
'System Process Protection',
'System Process Protection Child',
'Malicious Activity',
'Malicious Activity Child',
'Self Protect',
'Self Protect Child',
'Behavioral Protection',
'Behavioral Protection Child',
'Total Exclusions']]
# Caution level policies
LOG.debug('Find policies with 90 - 100 Process Exclusions')
caution_list = | pd.DataFrame() | pandas.DataFrame |
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.parametrize("method", ["cummin", "cummax"])
def test_cummin_cummax(self, datetime_series, method):
ufunc = methods[method]
result = getattr(datetime_series, method)().values
expected = ufunc(np.array(datetime_series))
tm.assert_numpy_array_equal(result, expected)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = getattr(ts, method)()[1::2]
expected = ufunc(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
pd.Timedelta(0),
| pd.Timestamp("1999-12-31") | pandas.Timestamp |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, | pd.Timestamp('2015-02-10') | pandas.Timestamp |
import os
import copy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
import geopandas as gpd
import matplotlib.colors as colors
from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
LL_date = '210412'
idph_data_path = '/Volumes/fsmresfiles/PrevMed/Covid-19-Modeling/IDPH line list'
cleaned_line_list_fname = os.path.join(idph_data_path,
'LL_%s_JGcleaned_no_race.csv' % LL_date)
box_data_path = '/Users/jlg1657/Box/NU-malaria-team/data/covid_IDPH'
project_path = '/Users/jlg1657/Box/NU-malaria-team/projects/covid_chicago'
plot_path = os.path.join(project_path, 'Plots + Graphs', '_trend_tracking')
emr_fname = os.path.join(box_data_path, 'emresource_by_region.csv')
spec_coll_fname = os.path.join(box_data_path, 'Corona virus reports', '%s_LL_cases_by_EMS_spec_collection.csv' % LL_date)
shp_path = os.path.join(box_data_path, 'shapefiles')
def load_cleaned_line_list() :
df = pd.read_csv(cleaned_line_list_fname)
return df
def make_heatmap(ax, adf, col) :
palette = sns.color_palette('RdYlBu_r', 101)
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(by=['EMS', 'date'])
ax.fill_between([np.min(df['date']), np.max(df['date']) + timedelta(days=1)],
[0.5, 0.5], [11.5, 11.5], linewidth=0, color=palette[0])
for ems, edf in df.groupby('EMS') :
max_in_col = np.max(edf[col])
print(ems, max_in_col)
for r, row in edf.iterrows() :
ax.fill_between([row['date'], row['date'] + timedelta(days=1)],
[ems-0.5, ems-0.5], [ems+0.5, ems+0.5],
color=palette[int(row[col]/max_in_col*100)],
linewidth=0)
ax.set_title(col)
ax.set_ylabel('EMS region')
formatter = mdates.DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
def heatmap() :
adf = load_cleaned_line_list()
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0.05, right=0.97)
cols = ['specimen_collection', 'deceased_date']
for c, col in enumerate(cols) :
ax = fig.add_subplot(1,len(cols),c+1)
make_heatmap(ax, adf, col)
plt.savefig(os.path.join(plot_path, 'EMS_cases_deaths_heatmap_%sLL.png' % LL_date))
plt.show()
def aggregate_to_date_spec_collection() :
adf = load_cleaned_line_list()
col = 'specimen_collection'
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df = df.sort_values(by=['EMS', 'date'])
df.to_csv(spec_coll_fname, index=False)
def plot_EMS_by_line(colname) :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df[df['covid_region'].isin(range(1,12))]
df['date'] = pd.to_datetime(df['date'])
# df = df[(df['date'] > date(2020, 2, 29)) & (df['date'] < date(2021, 1, 1))]
col = 'moving_ave'
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
fig = plt.figure(figsize=(11,6))
fig.subplots_adjust(left=0.07, right=0.97, bottom=0.05, top=0.95, hspace=0.3, wspace=0.25)
palette = sns.color_palette('Set1')
formatter = mdates.DateFormatter("%m-%d")
for e, (ems, edf) in enumerate(df.groupby('covid_region')) :
ax = fig.add_subplot(3,4,e+1)
edf['moving_ave'] = edf[colname].rolling(window=7, center=False).mean()
max_in_col = np.max(edf[col])
ax.plot(edf['date'], edf[col], color=palette[0], label=ems)
ax.fill_between(edf['date'].values, [0]*len(edf[col]), edf[col],
color=palette[0], linewidth=0, alpha=0.3)
ax.set_title('region %d' % ems)
ax.set_ylim(0, max_in_col*1.05)
ax.set_xlim(date(2020,3,10), np.max(df['date']))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
if e%4 == 0 :
ax.set_ylabel(colname)
fig.suptitle(colname)
plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL.png' % (colname, LL_date)))
# plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL_2020.pdf' % (colname, LL_date)), format='PDF')
def format_ax(ax, name) :
ax.set_title(name)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_ratio_ems() :
def get_ratio(adf, ems, w):
edf = adf[adf['EMS'] == ems]
col = 'specimen_collection'
d = edf[col].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
return recent / back
df = pd.read_csv(spec_coll_fname)
df['date'] = pd.to_datetime(df['date'])
max_date = date(2020, 7, 8)
df = df[df['date'] <= max_date]
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
ems_shp['REGION'] = ems_shp['REGION'].astype(int)
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0.4, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ems_shp['ratio'] = ems_shp['REGION'].apply(lambda x : get_ratio(df, x, week))
ems_shp.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases by specimen collection date\nLL data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'EMS_weekly_case_ratio_%sLL.png' % LL_date))
def load_county_map_with_public_data() :
from public_idph_data import load_county_cases
df = load_county_cases()
county_shp = gpd.read_file(os.path.join(shp_path, 'IL_BNDY_County', 'IL_BNDY_County_Py.shp'))
cols = ['Positive_Cases', 'Deaths', 'Tested']
sdf = df[df['County'].isin(['Cook', 'Chicago'])]
sdf = sdf.groupby('update_date')[cols].agg(np.sum).reset_index()
sdf['County'] = 'Cook'
df = df[~df['County'].isin(['Cook', 'Chicago'])]
df = pd.concat([df, sdf], sort=True)
df['County'] = df['County'].apply(lambda x : x.upper())
df.loc[df['County'] == 'DE WITT', 'County'] = 'DEWITT'
# ds_shp = pd.merge(left=county_shp, right=df, left_on='COUNTY_NAM', right_on='County')
df = df.sort_values(by=['County', 'update_date'])
return county_shp, df
def plot_ratio_county() :
ds_shp, df = load_county_map_with_public_data()
max_date = np.max(df['update_date'])
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
def get_ratio(adf, county, w):
cdf = adf[adf['County'] == county.upper()]
# if len(cdf) == 0 :
# return 100
cdf['daily_pos'] = np.insert(np.diff(cdf['Positive_Cases']), 0, 0)
d = cdf['daily_pos'].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
if back == 0 and recent == 0 :
return -1
if back == 0 :
return vmax
return min([recent / back, vmax])
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ds_shp['ratio'] = ds_shp['COUNTY_NAM'].apply(lambda x : get_ratio(df, x, week))
ds_shp.plot(ax=ax, color='#969696', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] == -1]
pdf.plot(ax=ax, color='#313695', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] >= 0]
pdf.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases\npublic data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'county_weekly_case_ratio.png'))
def plot_LL_all_IL() :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df.groupby('date')[['cases', 'deaths', 'admissions']].agg(np.sum).reset_index()
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(by='date')
df = df[df['date'] >= date(2020, 3, 15)]
palette = load_color_palette('wes')
formatter = mdates.DateFormatter("%m-%d")
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
fig = plt.figure(figsize=(8,6))
fig.subplots_adjust(left=0.1, right=0.97, bottom=0.05, top=0.97)
def plot_data(adf, ax, col, color) :
ax.bar(adf['date'].values, adf[col],
align='center', color=color, linewidth=0, alpha=0.5)
adf['moving_ave'] = adf[col].rolling(window=7, center=True).mean()
ax.plot(adf['date'], adf['moving_ave'], '-', color=color)
ax.set_ylabel('positives')
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.set_ylabel(col)
ax = fig.add_subplot(3,1,1)
plot_data(df, ax, 'cases', palette[0])
ax = fig.add_subplot(3,1,2)
plot_data(df, ax, 'admissions', palette[4])
ax = fig.add_subplot(3,1,3)
plot_data(df, ax, 'deaths', palette[3])
fig.savefig(os.path.join(plot_path, 'IL_cases_deaths_LL%s.png' % LL_date))
fig.savefig(os.path.join(plot_path, 'IL_cases_deaths_LL%s.pdf' % LL_date), format='PDF')
def combo_LL_emr() :
ldf = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_ems.csv' % LL_date))
edf = pd.read_csv(os.path.join(box_data_path, 'Corona virus reports', 'emresource_by_region.csv'))
edf['date'] = pd.to_datetime(edf['date_of_extract'])
edf = edf.rename(columns={'region' : 'EMS'})
edf = edf[['date', 'covid_non_icu', 'confirmed_covid_icu', 'EMS']]
ldf['date'] = pd.to_datetime(ldf['date'])
df = | pd.merge(left=ldf, right=edf, on=['date', 'EMS'], how='outer') | pandas.merge |
import colorlover as cl
import glob
import logging
import os
import pandas as pd
from bokeh.io import export_svgs, export_png
from bokeh.plotting import figure
from fivepseq import config
from fivepseq.logic.structures.fivepseq_counts import CountManager, FivePSeqCounts
from fivepseq.util.writers import FivePSeqOut
from fivepseq.viz.bokeh_plots import bokeh_scatter_plot, bokeh_triangle_plot, bokeh_heatmap_grid, bokeh_frame_barplots, \
bokeh_composite, bokeh_fft_plot
import numpy as np
from fivepseq.logic.structures.codons import Codons
class VizPipeline:
FILTER_TOP_POPULATED = "populated"
FILTER_CANONICAL_TRANSCRIPTS = "canonical"
METACOUNTS_TERM = "_metacounts_term"
METACOUNTS_START = "_metacounts_start"
METACOUNTS_TERM_SCALED = "_metacounts_term_scaled"
METACOUNTS_START_SCALED = "_metacounts_start_scaled"
TRIANGLE_TERM = "_triangle_term"
TRIANGLE_START = "_triangle_start"
FRAME_TERM = "_frames_term"
FRAME_START = "_frames_start"
AMINO_ACID_PAUSES = "_amino_acid_pauses"
AMINO_ACID_PAUSES_SCALED = "_amino_acid_pauses_scaled"
FFT_TERM = "_fft_term"
FFT_START = "_fft_start"
count_folders = None
title = "fivepseq_plot_canvas"
png_dir = None
svg_dir = None
supplement_dir = "supplement"
supplement_png_dir = None
supplement_svg_dir = None
logger = logging.getLogger(config.FIVEPSEQ_PLOT_LOGGER)
fivepseq_counts = None
args = None
samples = []
meta_count_term_dict = {}
meta_count_start_dict = {}
count_vector_list_start_dict = {}
count_vector_list_term_dict = {}
amino_acid_df_dict = {}
amino_acid_df_full_dict = {}
codon_df_dict = {}
codon_basesorted_df_dict = {}
frame_count_term_dict = {}
frame_count_start_dict = {}
frame_stats_df_dict = {}
fft_signal_start_dict = {}
fft_signal_term_dict = {}
loci_meta_counts_dict = {}
data_summary_dict = {}
transcript_index = None
combine = False # do not combine plots until data counts are successfully combined
COMBINED = "combined"
meta_count_start_combined = pd.DataFrame()
meta_count_term_combined = pd.DataFrame()
frame_count_START_combined = | pd.DataFrame() | pandas.DataFrame |
"""
PIData contains a number of auxiliary classes that define common functionality
among :class:`PIPoint` and :class:`PIAFAttribute` objects.
"""
# pragma pylint: disable=unused-import
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
ascii,
bytes,
chr,
dict,
filter,
hex,
input,
int,
list,
map,
next,
object,
oct,
open,
pow,
range,
round,
str,
super,
zip,
)
from datetime import datetime
# pragma pylint: enable=unused-import
try:
from abc import ABC, abstractmethod
except ImportError:
from abc import ABCMeta, abstractmethod
from __builtin__ import str as BuiltinStr
ABC = ABCMeta(BuiltinStr("ABC"), (object,), {"__slots__": ()})
from pandas import DataFrame, Series
from PIconnect.AFSDK import AF
from PIconnect.PIConsts import (
BufferMode,
CalculationBasis,
ExpressionSampleType,
RetrievalMode,
SummaryType,
TimestampCalculation,
UpdateMode,
get_enumerated_value,
)
from PIconnect.time import timestamp_to_index, to_af_time_range, to_af_time
class PISeries(Series):
"""PISeries
Create a timeseries, derived from :class:`pandas.Series`
Args:
tag (str): Name of the new series
timestamp (List[datetime]): List of datetime objects to
create the new index
value (List): List of values for the timeseries, should be equally long
as the `timestamp` argument
uom (str, optional): Defaults to None. Unit of measurement for the
series
.. todo::
Remove class, return to either plain :class:`pandas.Series` or a
composition where the Series is just an attribute
"""
version = "0.1.0"
def __init__(self, tag, timestamp, value, uom=None, *args, **kwargs):
Series.__init__(self, data=value, index=timestamp, name=tag, *args, **kwargs)
self.tag = tag
self.uom = uom
class PISeriesContainer(ABC):
"""PISeriesContainer
With the ABC class we represent a general behaviour with PI Point object
(General class for objects that return :class:`PISeries` objects).
.. todo::
Move `__boundary_types` to PIConsts as a new enumeration
"""
version = "0.1.0"
__boundary_types = {
"inside": AF.Data.AFBoundaryType.Inside,
"outside": AF.Data.AFBoundaryType.Outside,
"interpolate": AF.Data.AFBoundaryType.Interpolated,
}
def __init__(self):
pass
@abstractmethod
def _recorded_values(self, time_range, boundary_type, filter_expression):
"""Abstract implementation for recorded values
The internals for retrieving recorded values from PI and PI-AF are
different and should therefore be implemented by the respective data
containers.
"""
pass
@abstractmethod
def _interpolated_values(self, time_range, interval, filter_expression):
pass
@abstractmethod
def _interpolated_value(self, time):
pass
@abstractmethod
def _recorded_value(self, time, retrieval_mode):
pass
@abstractmethod
def _summary(self, time_range, summary_types, calculation_basis, time_type):
pass
@abstractmethod
def _summaries(
self, time_range, interval, summary_types, calculation_basis, time_type
):
pass
@abstractmethod
def _filtered_summaries(
self,
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
):
pass
@abstractmethod
def _current_value(self):
pass
@abstractmethod
def _update_value(self, value, update_mode, buffer_mode):
pass
@abstractmethod
def name(self):
pass
@abstractmethod
def units_of_measurement(self):
pass
@property
def current_value(self):
"""current_value
Return the current value of the attribute."""
return self._current_value()
def interpolated_value(self, time):
"""interpolated_value
Return a PISeries with an interpolated value at the given time
Args:
time (str): String containing the date, and possibly time,
for which to retrieve the value. This is parsed, using
:afsdk:`AF.Time.AFTime <M_OSIsoft_AF_Time_AFTime__ctor_7.htm>`.
Returns:
PISeries: A PISeries with a single row, with the corresponding time as
the index
"""
time = to_af_time(time)
pivalue = self._interpolated_value(time)
return PISeries(
tag=self.name,
value=pivalue.Value,
timestamp=[timestamp_to_index(pivalue.Timestamp.UtcTime)],
uom=self.units_of_measurement,
)
def recorded_value(self, time, retrieval_mode=RetrievalMode.AUTO):
"""recorded_value
Return a PISeries with the recorded value at or close to the given time
Args:
time (str): String containing the date, and possibly time,
for which to retrieve the value. This is parsed, using
:afsdk:`AF.Time.AFTime <M_OSIsoft_AF_Time_AFTime__ctor_7.htm>`.
retrieval_mode (int or :any:`PIConsts.RetrievalMode`): Flag determining
which value to return if no value available at the exact requested
time.
Returns:
PISeries: A PISeries with a single row, with the corresponding time as
the index
"""
time = to_af_time(time)
pivalue = self._recorded_value(time, retrieval_mode)
return PISeries(
tag=self.name,
value=pivalue.Value,
timestamp=[timestamp_to_index(pivalue.Timestamp.UtcTime)],
uom=self.units_of_measurement,
)
def update_value(
self,
value,
time=None,
update_mode=UpdateMode.NO_REPLACE,
buffer_mode=BufferMode.BUFFER_IF_POSSIBLE,
):
"""Update value for existing PI object.
Args:
value: value type should be in cohesion with PI object or
it will raise PIException: [-10702] STATE Not Found
time (datetime, optional): it is not possible to set future value,
it raises PIException: [-11046] Target Date in Future.
You can combine update_mode and time to change already stored value.
"""
if time:
time = to_af_time(time)
value = AF.Asset.AFValue(value, time)
return self._update_value(value, int(update_mode), int(buffer_mode))
def recorded_values(
self, start_time, end_time, boundary_type="inside", filter_expression=""
):
"""recorded_values
Return a PISeries of recorded data.
Data is returned between the given *start_time* and *end_time*,
inclusion of the boundaries is determined by the *boundary_type*
attribute. Both *start_time* and *end_time* are parsed by AF.Time and
allow for time specification relative to "now" by use of the asterisk.
By default the *boundary_type* is set to 'inside', which returns from
the first value after *start_time* to the last value before *end_time*.
The other options are 'outside', which returns from the last value
before *start_time* to the first value before *end_time*, and
'interpolate', which interpolates the first value to the given
*start_time* and the last value to the given *end_time*.
*filter_expression* is an optional string to filter the returned
values, see OSIsoft PI documentation for more information.
The AF SDK allows for inclusion of filtered data, with filtered values
marked as such. At this point PIconnect does not support this and
filtered values are always left out entirely.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
boundary_type (str, optional): Defaults to 'inside'. Key from the
`__boundary_types` dictionary to describe how to handle the
boundaries of the time range.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
Returns:
PISeries: Timeseries of the values returned by the SDK
Raises:
ValueError: If the provided `boundary_type` is not a valid key a
`ValueError` is raised.
"""
time_range = to_af_time_range(start_time, end_time)
boundary_type = self.__boundary_types.get(boundary_type.lower())
filter_expression = self._normalize_filter_expression(filter_expression)
if boundary_type is None:
raise ValueError(
"Argument boundary_type must be one of "
+ ", ".join('"%s"' % x for x in sorted(self.__boundary_types.keys()))
)
pivalues = self._recorded_values(time_range, boundary_type, filter_expression)
timestamps, values = [], []
for value in pivalues:
timestamps.append(timestamp_to_index(value.Timestamp.UtcTime))
values.append(value.Value)
return PISeries(
tag=self.name,
timestamp=timestamps,
value=values,
uom=self.units_of_measurement,
)
def interpolated_values(self, start_time, end_time, interval, filter_expression=""):
"""interpolated_values
Return a PISeries of interpolated data.
Data is returned between *start_time* and *end_time* at a fixed
*interval*. All three values are parsed by AF.Time and the first two
allow for time specification relative to "now" by use of the
asterisk.
*filter_expression* is an optional string to filter the returned
values, see OSIsoft PI documentation for more information.
The AF SDK allows for inclusion of filtered data, with filtered
values marked as such. At this point PIconnect does not support this
and filtered values are always left out entirely.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
Returns:
PISeries: Timeseries of the values returned by the SDK
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
filter_expression = self._normalize_filter_expression(filter_expression)
pivalues = self._interpolated_values(time_range, interval, filter_expression)
timestamps, values = [], []
for value in pivalues:
timestamps.append(timestamp_to_index(value.Timestamp.UtcTime))
values.append(value.Value)
return PISeries(
tag=self.name,
timestamp=timestamps,
value=values,
uom=self.units_of_measurement,
)
def summary(
self,
start_time,
end_time,
summary_types,
calculation_basis=CalculationBasis.TIME_WEIGHTED,
time_type=TimestampCalculation.AUTO,
):
"""summary
Return one or more summary values over a single time range.
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using :afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using :afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
summary_types = int(summary_types)
calculation_basis = int(calculation_basis)
time_type = int(time_type)
pivalues = self._summary(
time_range, summary_types, calculation_basis, time_type
)
df = DataFrame()
for summary in pivalues:
key = SummaryType(summary.Key).name
value = summary.Value
timestamp = timestamp_to_index(value.Timestamp.UtcTime)
value = value.Value
df = df.join(DataFrame(data={key: value}, index=[timestamp]), how="outer")
return df
def summaries(
self,
start_time,
end_time,
interval,
summary_types,
calculation_basis=CalculationBasis.TIME_WEIGHTED,
time_type=TimestampCalculation.AUTO,
):
"""summaries
Return one or more summary values for each interval within a time range
Args:
start_time (str or datetime): Containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): Containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
summary_types = int(summary_types)
calculation_basis = int(calculation_basis)
time_type = int(time_type)
pivalues = self._summaries(
time_range, interval, summary_types, calculation_basis, time_type
)
df = DataFrame()
for summary in pivalues:
key = SummaryType(summary.Key).name
timestamps, values = zip(
*[
(timestamp_to_index(value.Timestamp.UtcTime), value.Value)
for value in summary.Value
]
)
df = df.join(DataFrame(data={key: values}, index=timestamps), how="outer")
return df
def filtered_summaries(
self,
start_time,
end_time,
interval,
filter_expression,
summary_types,
calculation_basis=None,
filter_evaluation=None,
filter_interval=None,
time_type=None,
):
"""filtered_summaries
Return one or more summary values for each interval within a time range
Args:
start_time (str or datetime): String containing the date, and possibly time,
from which to retrieve the values. This is parsed, together
with `end_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
end_time (str or datetime): String containing the date, and possibly time,
until which to retrieve values. This is parsed, together
with `start_time`, using
:afsdk:`AF.Time.AFTimeRange <M_OSIsoft_AF_Time_AFTimeRange__ctor_1.htm>`.
interval (str): String containing the interval at which to extract
data. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
filter_expression (str, optional): Defaults to ''. Query on which
data to include in the results. See :ref:`filtering_values`
for more information on filter queries.
summary_types (int or PIConsts.SummaryType): Type(s) of summaries
of the data within the requested time range.
calculation_basis (int or PIConsts.CalculationBasis, optional):
Event weighting within an interval. See :ref:`event_weighting`
and :any:`CalculationBasis` for more information. Defaults to
CalculationBasis.TIME_WEIGHTED.
filter_evaluation (int or PIConsts,ExpressionSampleType, optional):
Determines whether the filter is applied to the raw events in
the database, of if it is applied to an interpolated series
with a regular interval. Defaults to
ExpressionSampleType.EXPRESSION_RECORDED_VALUES.
filter_interval (str, optional): String containing the interval at
which to extract apply the filter. This is parsed using
:afsdk:`AF.Time.AFTimeSpan.Parse <M_OSIsoft_AF_Time_AFTimeSpan_Parse_1.htm>`.
time_type (int or PIConsts.TimestampCalculation, optional):
Timestamp to return for each of the requested summaries. See
:ref:`summary_timestamps` and :any:`TimestampCalculation` for
more information. Defaults to TimestampCalculation.AUTO.
Returns:
pandas.DataFrame: Dataframe with the unique timestamps as row index
and the summary name as column name.
"""
time_range = to_af_time_range(start_time, end_time)
interval = AF.Time.AFTimeSpan.Parse(interval)
filter_expression = self._normalize_filter_expression(filter_expression)
calculation_basis = get_enumerated_value(
enumeration=CalculationBasis,
value=calculation_basis,
default=CalculationBasis.TIME_WEIGHTED,
)
filter_evaluation = get_enumerated_value(
enumeration=ExpressionSampleType,
value=filter_evaluation,
default=ExpressionSampleType.EXPRESSION_RECORDED_VALUES,
)
time_type = get_enumerated_value(
enumeration=TimestampCalculation,
value=time_type,
default=TimestampCalculation.AUTO,
)
filter_interval = AF.Time.AFTimeSpan.Parse(filter_interval)
pivalues = self._filtered_summaries(
time_range,
interval,
filter_expression,
summary_types,
calculation_basis,
filter_evaluation,
filter_interval,
time_type,
)
df = DataFrame()
for summary in pivalues:
key = SummaryType(summary.Key).name
timestamps, values = zip(
*[
(timestamp_to_index(value.Timestamp.UtcTime), value.Value)
for value in summary.Value
]
)
df = df.join( | DataFrame(data={key: values}, index=timestamps) | pandas.DataFrame |
from cgi import test
import pandas as pd
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, precision_recall_fscore_support, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import GridSearchCV
from joblib import dump, load
from sklearn.preprocessing import StandardScaler
train_flag = 0
validation_flag = 1
test_flag = 1
if __name__ == '__main__':
df = | pd.read_csv('../data/ADAS_blffuz_2.csv') | pandas.read_csv |
# This script takes VIC WRF files supplied by the University of Washington Climate Impacts Group
# and exports precipitation and temperature averaged from the multiple climate simulation runs.
# wrf_dir and frc_dir are directories where the projections (with temperature) and forcings (with precipitation)
# files are located.
# Written in Python 3.7
import __init__
import scripts.config as config
import numpy as np
import pandas as pd
import os
# =======================================================================
# Load raw VIC WRF climate files
wrf_dir = config.data_path / 'precip' / 'VIC_WRF_EllsworthCr'
wrf_file = 'flux_46.40625_-123.90625'
wrf_cols = ["YEAR", "MONTH", "DAY", "HOUR", "OUT_PREC", "OUT_PET_SHORT",
"OUT_SWE", "OUT_EVAP", "OUT_RUNOFF", "OUT_BASEFLOW",
"OUT_SOIL_MOIST0", "OUT_SOIL_MOIST1", "OUT_SOIL_MOIST2"]
for sim_dir in os.listdir(wrf_dir):
runs = os.listdir(wrf_dir / sim_dir)
try:
runs.remove('sim_avg')
except ValueError:
pass
arrs = []
for run in runs:
arr = np.loadtxt(wrf_dir / sim_dir / run / wrf_file)
arrs.append(arr)
stack = np.dstack(arrs)
averaged = np.mean(stack, axis=2)
out_dir = wrf_dir / sim_dir / 'sim_avg'
try:
out_dir.mkdir(parents=True)
except FileExistsError:
pass
np.savetxt(out_dir / '{}.gz'.format(wrf_file), averaged)
# =======================================================================
# Save averaged temp file
forc_dir = config.data_path / 'precip' / 'WRF_frcs_EllsworthCr_forcings'
forc_file = 'forc_46.40625_-123.90625'
forc_cols = ['Year', 'Month', 'Day', 'Hour', 'Precip(mm)', 'Temp(C)',
'Wind(m/s)', 'SWrad(W/m2)', 'LWrad(W/m2)', 'pressure(kPa)',
'VaporPress(kPa)']
cols = ['Temp(C)']
inds = [forc_cols.index(x) for x in cols]
arrs = []
sim_dirs = []
for sim_dir in os.listdir(forc_dir):
if sim_dir == 'pnnl_historical':
continue
sim_dirs.append(sim_dir)
arr = np.loadtxt(forc_dir / sim_dir / forc_file)
arrs.append(arr[:, inds])
stack = np.column_stack(arrs)
proj_sims_temp = pd.DataFrame(stack, columns=sim_dirs)
date_arr = pd.DataFrame(arr, columns=forc_cols)
proj_sims_temp = np.column_stack([date_arr[['Year', 'Month', 'Day']], stack])
# Export just averaged temp to speed up imports in the future
gcm_avg_forc_dir = forc_dir / 'sim_avg'
try:
gcm_avg_forc_dir.mkdir(parents=True)
except FileExistsError:
pass
np.savetxt(gcm_avg_forc_dir / 'sim_avg_temp.gz', proj_sims_temp)
# =======================================================================
# Save averaged precip file
wrf_file = 'flux_46.40625_-123.90625'
wrf_cols = ["YEAR", "MONTH", "DAY", "HOUR", "OUT_PREC", "OUT_PET_SHORT",
"OUT_SWE", "OUT_EVAP", "OUT_RUNOFF", "OUT_BASEFLOW",
"OUT_SOIL_MOIST0", "OUT_SOIL_MOIST1", "OUT_SOIL_MOIST2"]
cols = ['OUT_PREC']
inds = [wrf_cols.index(x) for x in cols]
arrs = []
sim_dirs = []
for sim_dir in os.listdir(wrf_dir):
if sim_dir == 'pnnl_historical':
continue
sim_dirs.append(sim_dir)
arr = np.loadtxt(wrf_dir / sim_dir / 'sim_avg' / '{}.gz'.format(wrf_file))
arrs.append(arr[:, inds])
stack = np.column_stack(arrs)
proj_sims_ppt = | pd.DataFrame(stack, columns=sim_dirs) | pandas.DataFrame |
import os
import pandas as pd
from collections import defaultdict
import argparse
from pattern.text.en import singularize
# Dictionary used to store subject counts
subject_counts = defaultdict(lambda:0)
# Reads in the data
def read_data(filename):
print("Reading in {}".format(filename))
df = pd.read_csv(filename, skiprows = 1, names = ['doi', 'subjects', 'title'], delimiter="|")
return df
def sort(df, dhlw):
# Used to store our cleaned subject data
cleaned_data = pd.DataFrame(columns=['doi', 'subjects', 'title'])
cleaned_data_filename = 'data/tru_cleaned.csv'
if dhlw:
cleaned_data_filename = 'data/dhlw_cleaned.csv'
blank_subjects = 0 # number that OSTI listed as blank...
removed_subjects = 0 # number of subjects that were all digits, dots, *, -, and whitespaces
#p = nltk.PorterStemmer()
for i,r in df.iterrows():
subjects_str = r['subjects']
if not pd.isnull(subjects_str):
subjects = subjects_str.split(";")
cleaned_subjects = []
for s in subjects:
cleaned_s = s.lower().strip() # first cleans by removing whitespace and then putting it all to lowercase
cleaned_s = cleaned_s.lstrip('0123456789.-* ') # removes all digits, dots, dashes, and spaces from the start
if cleaned_s != "":
# converts the last word in the subject to be singular
cleaned_s_words = cleaned_s.split(" ")
cleaned_s_words[len(cleaned_s_words)-1] = singularize(cleaned_s_words[len(cleaned_s_words)-1])
cleaned_s = " ".join(cleaned_s_words)
subject_counts[cleaned_s] += 1
cleaned_subjects.append(cleaned_s)
else:
if s == "":
blank_subjects += 1
else:
removed_subjects += 1
subjects_str = ';'.join(cleaned_subjects)
else:
subjects_str = ""
cleaned_data = cleaned_data.append(pd.DataFrame({'title':r['title'], 'doi':r['doi'], 'subjects':subjects_str}, index=[0]), ignore_index=True)
cleaned_data.to_csv(cleaned_data_filename, sep='|')
print("Blank subjects: " + str(blank_subjects))
print("Removed subjects: " + str(removed_subjects))
# Write the output subject counts to a new file
def write_files(dhlw):
filename = "data/tru_subject_counts.csv"
if dhlw:
filename = "data/dhlw_subject_counts.csv"
df = | pd.DataFrame(columns=['subject','count']) | pandas.DataFrame |
#product databases
import pandas as pd
import csv
import numpy as np
import random
a=np.random.uniform(6.0,7.0,150)
b=np.random.uniform(2.0,4.0,150)
c=np.random.uniform(5.0,5.5,150)
d=np.random.uniform(1.5,2.5,150)
q=[]
for i in range(150):
e=np.random.choice(['a','b'])
q.append(e)
#print(matix1.shape)
dic={'0':a,'1':b,'2':c,'3':d,'4':q}
df=pd.DataFrame(dic)
df.to_csv('trainingdatabases.csv',index=False)
df0= | pd.read_csv('trainingdatabases.csv') | pandas.read_csv |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_dvh_metrics = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
df_clinical_criteria = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
weights_list = []
weight_columns = []
# Iterate through each prediction in the list of prediction_names
for prediction in prediction_names:
# Make a dataloader that loads predicted dose distributions
prediction_paths = get_paths(f'{cs.prediction_dir}/{prediction}', ext='csv')
prediction_dose_loader = DataLoader(prediction_paths, mode_name='predicted_dose') # Set prediction loader
# Evaluate predictions and plans with respect to ground truth
dose_evaluator = EvaluateDose(patient_data_loader, prediction_dose_loader)
populate_error_dfs(dose_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
'Prediction')
# Make dataloader for plan dose distributions
for opt_name in optimizer_names:
print(opt_name)
# Get the paths of all optimized plans for prediction
cs.get_optimization_directories(prediction, opt_name)
weights_list, weight_columns = populate_weights_df(cs, weights_list)
populate_solve_time_df(cs, df_solve_time)
# Make data loader to load plan doses
plan_paths = get_paths(cs.plan_dose_from_pred_dir, ext='csv') # List of all plan dose paths
plan_dose_loader = DataLoader(plan_paths, mode_name='predicted_dose') # Set plan dose loader
plan_evaluator = EvaluateDose(patient_data_loader, plan_dose_loader) # Make evaluation object
# Ignore prediction name if no data exists, o/w populate DataFrames
if not patient_data_loader.file_paths_list:
print('No patient information was given to calculate metrics')
else:
# Evaluate prediction errors
populate_error_dfs(plan_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
opt_name)
# Clean up weights
weights_df = pd.DataFrame(weights_list, columns=weight_columns)
weights_df.set_index(['Objective', 'Structure', 'Patients', 'Dose_type', 'Prediction'], inplace=True)
weights_df = weights_df.unstack('Prediction')
# Save dose and DVH error DataFrames
df_dose_error.to_csv(consolidate_data_paths['dose'])
df_dvh_metrics.to_csv(consolidate_data_paths['dvh'])
df_clinical_criteria.to_csv(consolidate_data_paths['clinical_criteria'])
weights_df.to_csv(consolidate_data_paths['weights'])
df_solve_time.to_csv(consolidate_data_paths['solve_time'])
# Loads the DataFrames that contain consolidated data
df_dose_error = pd.read_csv(consolidate_data_paths['dose'], index_col=[0, 1])
df_dvh_metrics = pd.read_csv(consolidate_data_paths['dvh'], index_col=[0, 1, 2, 3])
df_clinical_criteria = | pd.read_csv(consolidate_data_paths['clinical_criteria'], index_col=[0, 1, 2, 3]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Consensus non-negative matrix factorization (cNMF) adapted from (Kotliar, et al. 2019)
"""
import numpy as np
import pandas as pd
import os, errno
import glob
import shutil
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
import warnings
from scipy.spatial.distance import squareform
from sklearn.decomposition import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from sklearn.preprocessing import normalize
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
from ._version import get_versions
def save_df_to_npz(obj, filename):
"""
Saves numpy array to `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
np.savez_compressed(
filename,
data=obj.values,
index=obj.index.values,
columns=obj.columns.values,
)
def save_df_to_text(obj, filename):
"""
Saves numpy array to tab-delimited text file
"""
obj.to_csv(filename, sep="\t")
def load_df_from_npz(filename):
"""
Loads numpy array from `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (
p for i, p in enumerate(iterable) if (i - worker_index) % total_workers == 0
)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1, 1))
D += squared_norms.reshape((1, -1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return beta
def fast_ols_all_cols_df(X, Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return beta
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean ** 2)
return var
def get_highvar_genes_sparse(
expression, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy()
E2.data **= 2
gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean ** 2))
del E2
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var) / gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = (
(gene_fano > w_fano_low)
& (gene_fano < w_fano_high)
& (gene_mean > w_mean_low)
& (gene_mean < w_mean_high)
)
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A ** 2) * gene_mean + (B ** 2)
fano_ratio = gene_fano / gene_expected_fano
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T = None
else:
if not expected_fano_threshold:
T = 1.0 + gene_counts_fano[winsor_box].std()
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame(
{
"mean": gene_mean,
"var": gene_var,
"fano": gene_fano,
"expected_fano": gene_expected_fano,
"high_var": high_var_genes_ind,
"fano_ratio": fano_ratio,
}
)
gene_fano_parameters = {
"A": A,
"B": B,
"T": T,
"minimal_mean": minimal_mean,
}
return (gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(
input_counts, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var / gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var) / gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = (
(gene_counts_fano > w_fano_low)
& (gene_counts_fano < w_fano_high)
& (gene_counts_mean > w_mean_low)
& (gene_counts_mean < w_mean_high)
)
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A ** 2) * gene_counts_mean + (B ** 2)
fano_ratio = gene_counts_fano / gene_expected_fano
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T = None
else:
if not expected_fano_threshold:
T = 1.0 + gene_counts_fano[winsor_box].std()
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame(
{
"mean": gene_counts_mean,
"var": gene_counts_var,
"fano": gene_counts_fano,
"expected_fano": gene_expected_fano,
"high_var": high_var_genes_ind,
"fano_ratio": fano_ratio,
}
)
gene_fano_parameters = {
"A": A,
"B": B,
"T": T,
"minimal_mean": minimal_mean,
}
return (gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
tpm.layers["raw_counts"] = tpm.X.copy()
sc.pp.normalize_total(tpm, target_sum=1e6)
return tpm
def subset_adata(adata, subset):
"""
Subsets anndata object on one or more `.obs` columns
"""
print("Subsetting AnnData on {}".format(subset), end="")
# initialize .obs column for choosing cells
adata.obs["adata_subset_combined"] = 0
# create label as union of given subset args
for i in range(len(subset)):
adata.obs.loc[
adata.obs[subset[i]].isin(["True", True, 1.0, 1]), "adata_subset_combined"
] = 1
adata = adata[adata.obs["adata_subset_combined"] == 1, :].copy()
adata.obs.drop(columns="adata_subset_combined", inplace=True)
print(" - now {} cells and {} genes".format(adata.n_obs, adata.n_vars))
return adata
def cnmf_markers(adata, spectra_score_file, n_genes=30, key="cnmf"):
"""
Read cNMF spectra into AnnData object
Reads in gene spectra score output from cNMF and saves top gene loadings for
each usage as dataframe in adata.uns
Parameters
----------
adata : AnnData.AnnData
AnnData object
spectra_score_file : str
`<name>.gene_spectra_score.<k>.<dt>.txt` file from cNMF containing gene
loadings
n_genes : int, optional (default=30)
number of top genes to list for each usage (rows of df)
key : str, optional (default="cnmf")
prefix of `adata.uns` keys to save
Returns
-------
adata : AnnData.AnnData
adata is edited in place to include gene spectra scores
(`adata.varm["cnmf_spectra"]`) and list of top genes by spectra score
(`adata.uns["cnmf_markers"]`)
"""
# load Z-scored GEPs which reflect gene enrichment, save to adata.varm
spectra = pd.read_csv(spectra_score_file, sep="\t", index_col=0).T
spectra = adata.var[[]].merge(
spectra, how="left", left_index=True, right_index=True
)
adata.varm["{}_spectra".format(key)] = spectra.values
# obtain top n_genes for each GEP in sorted order and combine them into df
top_genes = []
for gep in spectra.columns:
top_genes.append(
list(spectra.sort_values(by=gep, ascending=False).index[:n_genes])
)
# save output to adata.uns
adata.uns["{}_markers".format(key)] = pd.DataFrame(
top_genes, index=spectra.columns.astype(str)
).T
def cnmf_load_results(adata, cnmf_dir, name, k, dt, key="cnmf", **kwargs):
"""
Load results of cNMF
Given adata object and corresponding cNMF output (cnmf_dir, name, k, dt to
identify), read in relevant results and save to adata object inplace, and
output plot of gene loadings for each GEP usage.
Parameters
----------
adata : AnnData.AnnData
AnnData object
cnmf_dir : str
relative path to directory containing cNMF outputs
name : str
name of cNMF replicate
k : int
value used for consensus factorization
dt : int
distance threshold value used for consensus clustering
key : str, optional (default="cnmf")
prefix of adata.uns keys to save
n_points : int
how many top genes to include in rank_genes() plot
**kwargs : optional (default=None)
keyword args to pass to cnmf_markers()
Returns
-------
adata : AnnData.AnnData
`adata` is edited in place to include overdispersed genes
(`adata.var["cnmf_overdispersed"]`), usages (`adata.obs["usage_#"]`,
`adata.obsm["cnmf_usages"]`), gene spectra scores
(`adata.varm["cnmf_spectra"]`), and list of top genes by spectra score
(`adata.uns["cnmf_markers"]`).
"""
# read in cell usages
usage = pd.read_csv(
"{}/{}/{}.usages.k_{}.dt_{}.consensus.txt".format(
cnmf_dir, name, name, str(k), str(dt).replace(".", "_")
),
sep="\t",
index_col=0,
)
usage.columns = ["usage_" + str(col) for col in usage.columns]
# normalize usages to total for each cell
usage_norm = usage.div(usage.sum(axis=1), axis=0)
usage_norm.index = usage_norm.index.astype(str)
# add usages to .obs for visualization
adata.obs = pd.merge(
left=adata.obs, right=usage_norm, how="left", left_index=True, right_index=True
)
# replace missing values with zeros for all factors
adata.obs.loc[:, usage_norm.columns].fillna(value=0, inplace=True)
# add usages as array in .obsm for dimension reduction
adata.obsm["cnmf_usages"] = adata.obs.loc[:, usage_norm.columns].values
# read in overdispersed genes determined by cNMF and add as metadata to adata.var
overdispersed = np.genfromtxt(
"{}/{}/{}.overdispersed_genes.txt".format(cnmf_dir, name, name),
delimiter="\t",
dtype=str,
)
adata.var["cnmf_overdispersed"] = 0
adata.var.loc[
[x for x in adata.var.index if x in overdispersed], "cnmf_overdispersed"
] = 1
# read top gene loadings for each GEP usage and save to adata.uns['cnmf_markers']
cnmf_markers(
adata,
"{}/{}/{}.gene_spectra_score.k_{}.dt_{}.txt".format(
cnmf_dir, name, name, str(k), str(dt).replace(".", "_")
),
key=key,
**kwargs
)
class cNMF:
"""
Consensus NMF object
Containerizes the cNMF inputs and outputs to allow for easy pipelining
"""
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = "%s_%s" % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, "cnmf_tmp"))
self.paths = {
"normalized_counts": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".norm_counts.h5ad",
),
"nmf_replicate_parameters": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".nmf_params.df.npz",
),
"nmf_run_parameters": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".nmf_idvrun_params.yaml",
),
"nmf_genes_list": os.path.join(
self.output_dir, self.name, self.name + ".overdispersed_genes.txt"
),
"tpm": os.path.join(
self.output_dir, self.name, "cnmf_tmp", self.name + ".tpm.h5ad"
),
"tpm_stats": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".tpm_stats.df.npz",
),
"iter_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.iter_%d.df.npz",
),
"iter_usages": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".usages.k_%d.iter_%d.df.npz",
),
"merged_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.merged.df.npz",
),
"local_density_cache": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".local_density_cache.k_%d.merged.df.npz",
),
"consensus_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.dt_%s.consensus.df.npz",
),
"consensus_spectra__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".spectra.k_%d.dt_%s.consensus.txt",
),
"consensus_usages": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".usages.k_%d.dt_%s.consensus.df.npz",
),
"consensus_usages__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".usages.k_%d.dt_%s.consensus.txt",
),
"consensus_stats": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".stats.k_%d.dt_%s.df.npz",
),
"clustering_plot": os.path.join(
self.output_dir, self.name, self.name + ".clustering.k_%d.dt_%s.png"
),
"gene_spectra_score": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".gene_spectra_score.k_%d.dt_%s.df.npz",
),
"gene_spectra_score__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".gene_spectra_score.k_%d.dt_%s.txt",
),
"gene_spectra_tpm": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".gene_spectra_tpm.k_%d.dt_%s.df.npz",
),
"gene_spectra_tpm__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".gene_spectra_tpm.k_%d.dt_%s.txt",
),
"k_selection_plot": os.path.join(
self.output_dir, self.name, self.name + ".k_selection.png"
),
"k_selection_stats": os.path.join(
self.output_dir, self.name, self.name + ".k_selection_stats.df.npz"
),
}
def get_norm_counts(
self, counts, tpm, high_variance_genes_filter=None, num_highvar_genes=None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such
that no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data
matching counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using
the parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many
most overdispersed genes for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns
(genes) normalized to unit variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(
tpm.X, numgenes=num_highvar_genes
)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(
np.array(tpm.X), numgenes=num_highvar_genes
)
high_variance_genes_filter = list(
tpm.var.index[gene_counts_stats.high_var.values]
)
## Subset out high-variance genes
print(
"Selecting {} highly variable genes".format(len(high_variance_genes_filter))
)
norm_counts = counts[:, high_variance_genes_filter]
norm_counts = norm_counts[tpm.obs_names, :].copy()
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print("Warning: NaNs in normalized counts matrix")
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print("Warning: NaNs in normalized counts matrix")
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths["nmf_genes_list"], "w").write(
"\n".join(high_variance_genes_filter)
)
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1) == 0
if zerocells.sum() > 0:
print(
"Warning: %d cells have zero counts of overdispersed genes - ignoring these cells for factorization."
% (zerocells.sum())
)
sc.pp.filter_cells(norm_counts, min_counts=1)
return norm_counts
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
norm_counts.write(self.paths["normalized_counts"], compression="gzip")
def get_nmf_iter_params(
self, ks, n_iter=100, random_state_seed=None, beta_loss="kullback-leibler"
):
"""
Creates a DataFrame with parameters for NMF iterations
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run
independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several `k` are specified,
this many iterations will be run for each value of `k`.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks) * n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2 ** 32) - 1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(
replicate_params, columns=["n_components", "iter", "nmf_seed"]
)
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver="mu",
tol=1e-4,
max_iter=400,
regularization=None,
init="random",
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == "frobenius":
_nmf_kwargs["solver"] = "cd"
return (replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths["nmf_replicate_parameters"])
with open(self.paths["nmf_run_parameters"], "w") as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to `non_negative_factorization`
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return (spectra, usages)
def run_nmf(
self, worker_i=1, total_workers=1,
):
"""
Iteratively runs NMF with prespecified parameters
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from `self.paths['nmf_run_parameters']`,
defaults below::
`non_negative_factorization` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified
self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of `normalize_counts`)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of `prepare_nmf_iter_params`)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
norm_counts = sc.read(self.paths["normalized_counts"])
_nmf_kwargs = yaml.load(
open(self.paths["nmf_run_parameters"]), Loader=yaml.FullLoader
)
jobs_for_this_worker = worker_filter(
range(len(run_params)), worker_i, total_workers
)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print("[Worker %d]. Starting task %d." % (worker_i, idx))
_nmf_kwargs["random_state"] = p["nmf_seed"]
_nmf_kwargs["n_components"] = p["n_components"]
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(
spectra,
index=np.arange(1, _nmf_kwargs["n_components"] + 1),
columns=norm_counts.var.index,
)
save_df_to_npz(
spectra, self.paths["iter_spectra"] % (p["n_components"], p["iter"])
)
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
print("Combining factorizations for k=%d." % k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components == k)
run_params_subset = run_params[run_params.n_components == k].sort_values("iter")
spectra_labels = []
for i, p in run_params_subset.iterrows():
spectra = load_df_from_npz(
self.paths["iter_spectra"] % (p["n_components"], p["iter"])
)
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p["iter"], :, :] = spectra.values
for t in range(k):
spectra_labels.append("iter%d_topic%d" % (p["iter"], t + 1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(
combined_spectra, columns=spectra.columns, index=spectra_labels
)
save_df_to_npz(combined_spectra, self.paths["merged_spectra"] % k)
return combined_spectra
def consensus(
self,
k,
density_threshold_str="0.5",
local_neighborhood_size=0.30,
show_clustering=True,
skip_density_and_return_after_stats=False,
close_clustergram_fig=True,
):
merged_spectra = load_df_from_npz(self.paths["merged_spectra"] % k)
norm_counts = sc.read(self.paths["normalized_counts"])
if skip_density_and_return_after_stats:
density_threshold_str = "2"
density_threshold_repl = density_threshold_str.replace(".", "_")
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0] / k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T / np.sqrt((merged_spectra ** 2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths["local_density_cache"] % k):
local_density = load_df_from_npz(self.paths["local_density_cache"] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors + 1)[
:, : n_neighbors + 1
]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[
np.arange(topics_dist.shape[0])[:, None], partitioning_order
]
local_density = pd.DataFrame(
distance_to_nearest_neighbors.sum(1) / (n_neighbors),
columns=["local_density"],
index=l2_spectra.index,
)
save_df_to_npz(local_density, self.paths["local_density_cache"] % k)
del partitioning_order
del distance_to_nearest_neighbors
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(
kmeans_model.labels_ + 1, index=l2_spectra.index
)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T / median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(
l2_spectra.values, kmeans_cluster_labels, metric="euclidean"
)
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(
open(self.paths["nmf_run_parameters"]), Loader=yaml.FullLoader
)
refit_nmf_kwargs.update(
dict(n_components=k, H=median_spectra.values, update_H=False)
)
# ensure dtypes match for factorization
if median_spectra.values.dtype != norm_counts.X.dtype:
norm_counts.X = norm_counts.X.astype(median_spectra.values.dtype)
_, rf_usages = self._nmf(norm_counts.X, nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(
rf_usages, index=norm_counts.obs.index, columns=median_spectra.index
)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = (
((norm_counts.X.todense() - rf_pred_norm_counts) ** 2).sum().sum()
)
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts) ** 2).sum().sum()
consensus_stats = pd.DataFrame(
[k, density_threshold, stability, prediction_error],
index=["k", "local_density_threshold", "stability", "prediction_error"],
columns=["stats"],
)
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(
median_spectra,
self.paths["consensus_spectra"] % (k, density_threshold_repl),
)
save_df_to_npz(
rf_usages, self.paths["consensus_usages"] % (k, density_threshold_repl)
)
save_df_to_npz(
consensus_stats, self.paths["consensus_stats"] % (k, density_threshold_repl)
)
save_df_to_text(
median_spectra,
self.paths["consensus_spectra__txt"] % (k, density_threshold_repl),
)
save_df_to_text(
rf_usages, self.paths["consensus_usages__txt"] % (k, density_threshold_repl)
)
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths["tpm"])
# ignore cells not present in norm_counts
if tpm.n_obs != norm_counts.n_obs:
tpm = tpm[norm_counts.obs_names, :].copy()
tpm_stats = load_df_from_npz(self.paths["tpm_stats"])
if sp.issparse(tpm.X):
norm_tpm = (
np.array(tpm.X.todense()) - tpm_stats["__mean"].values
) / tpm_stats["__std"].values
else:
norm_tpm = (tpm.X - tpm_stats["__mean"].values) / tpm_stats["__std"].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(
usage_coef, index=rf_usages.columns, columns=tpm.var.index
)
save_df_to_npz(
usage_coef, self.paths["gene_spectra_score"] % (k, density_threshold_repl)
)
save_df_to_text(
usage_coef,
self.paths["gene_spectra_score__txt"] % (k, density_threshold_repl),
)
# Convert spectra to TPM units, and obtain results for all genes by running
# last step of NMF with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(H=norm_usages.T.values,))
# ensure dtypes match for factorization
if norm_usages.values.dtype != tpm.X.dtype:
tpm.X = tpm.X.astype(norm_usages.values.dtype)
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(
spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index
)
save_df_to_npz(
spectra_tpm, self.paths["gene_spectra_tpm"] % (k, density_threshold_repl)
)
save_df_to_text(
spectra_tpm,
self.paths["gene_spectra_tpm__txt"] % (k, density_threshold_repl),
)
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][
:, density_filter.values
]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels == cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[
cl_dist < 0
] = 0 # Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, "average")
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(
len(height_ratios),
len(width_ratios),
fig,
0.01,
0.01,
0.98,
0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0,
hspace=0,
)
dist_ax = fig.add_subplot(
gs[1, 1],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(
D, interpolation="none", cmap="viridis", aspect="auto", rasterized=True
)
left_ax = fig.add_subplot(
gs[1, 0],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
left_ax.imshow(
kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation="none",
cmap="Spectral",
aspect="auto",
rasterized=True,
)
top_ax = fig.add_subplot(
gs[0, 1],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
top_ax.imshow(
kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation="none",
cmap="Spectral",
aspect="auto",
rasterized=True,
)
hist_gs = gridspec.GridSpecFromSubplotSpec(
3, 1, subplot_spec=gs[1, 3], wspace=0, hspace=0
)
hist_ax = fig.add_subplot(
hist_gs[0, 0],
xscale="linear",
yscale="linear",
xlabel="",
ylabel="",
frameon=True,
title="Local density histogram",
)
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle="--", color="k")
hist_ax.text(
density_threshold + 0.02,
ylim[1] * 0.95,
"filtering\nthreshold\n\n",
va="top",
)
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel(
"Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering"
% (
sum(~density_filter),
len(density_filter),
100 * (~density_filter).mean(),
)
)
fig.savefig(
self.paths["clustering_plot"] % (k, density_threshold_repl), dpi=250
)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
"""
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
"""
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(
self.consensus(k, skip_density_and_return_after_stats=True).stats
)
stats = pd.DataFrame(stats)
stats.reset_index(drop=True, inplace=True)
save_df_to_npz(stats, self.paths["k_selection_stats"])
fig = plt.figure(figsize=(6, 4))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(stats.k, stats.stability, "o-", color="b")
ax1.set_ylabel("Stability", color="b", fontsize=15)
for tl in ax1.get_yticklabels():
tl.set_color("b")
# ax1.set_xlabel('K', fontsize=15)
ax2.plot(stats.k, stats.prediction_error, "o-", color="r")
ax2.set_ylabel("Error", color="r", fontsize=15)
for tl in ax2.get_yticklabels():
tl.set_color("r")
ax1.set_xlabel("Number of Components", fontsize=15)
ax1.grid(True)
plt.tight_layout()
fig.savefig(self.paths["k_selection_plot"], dpi=250)
if close_fig:
plt.close(fig)
def pick_k(k_selection_stats_path):
k_sel_stats = load_df_from_npz(k_selection_stats_path)
return int(k_sel_stats.loc[k_sel_stats.stability.idxmax, "k"])
def prepare(args):
argdict = vars(args)
cnmf_obj = cNMF(output_dir=argdict["output_dir"], name=argdict["name"])
cnmf_obj._initialize_dirs()
print("Reading in counts from {} - ".format(argdict["counts"]), end="")
if argdict["counts"].endswith(".h5ad"):
input_counts = sc.read(argdict["counts"])
else:
## Load txt or compressed dataframe and convert to scanpy object
if argdict["counts"].endswith(".npz"):
input_counts = load_df_from_npz(argdict["counts"])
else:
input_counts = pd.read_csv(argdict["counts"], sep="\t", index_col=0)
if argdict["densify"]:
input_counts = sc.AnnData(
X=input_counts.values,
obs=pd.DataFrame(index=input_counts.index),
var= | pd.DataFrame(index=input_counts.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import datetime
from financial import factor_cash_flow
from vision.table.valuation import Valuation
from vision.table.fin_cash_flow import FinCashFlow
from vision.table.fin_income import FinIncome
from vision.table.fin_balance_ttm import FinBalanceTTM
from vision.table.fin_income_ttm import FinIncomeTTM
from vision.table.fin_cash_flow_ttm import FinCashFlowTTM
from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet': 'financial.factor_cash_flow', 'class': 'FactorCashFlow'}, ]):
self._name = name
self._methods = methods
self._url = url
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# report data
cash_flow_sets = get_fin_consolidated_statements_pit(FinCashFlow,
[FinCashFlow.net_operate_cash_flow, # 经营活动现金流量净额
FinCashFlow.goods_sale_and_service_render_cash,
# 销售商品、提供劳务收到的现金
], dates=[trade_date]).drop(columns, axis=1)
income_sets = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue, # 营业收入
FinIncome.total_operating_cost, # 营业总成本
FinIncome.total_operating_revenue, # 营业总收入
], dates=[trade_date]).drsop(columns, axis=1)
tp_cash_flow = pd.merge(cash_flow_sets, income_sets, on="security_code")
# ttm data
balance_ttm_sets = get_fin_consolidated_statements_pit(FinBalanceTTM,
[FinBalanceTTM.total_liability, # 负债合计
FinBalanceTTM.shortterm_loan, # 短期借款
FinBalanceTTM.longterm_loan, # 长期借款
FinBalanceTTM.total_current_liability, # 流动负债合计
FinBalanceTTM.total_current_assets, # 流动资产合计
FinBalanceTTM.total_assets, # 资产总计
], dates=[trade_date]).drop(columns, axis=1)
cash_flow_ttm_sets = get_fin_consolidated_statements_pit(FinCashFlowTTM,
[FinCashFlowTTM.net_operate_cash_flow, # 经营活动现金流量净额
FinCashFlowTTM.cash_and_equivalents_at_end,
# 期末现金及现金等价物余额
FinCashFlowTTM.goods_sale_and_service_render_cash,
# 销售商品、提供劳务收到的现金
], dates=[trade_date]).drop(columns, axis=1)
ttm_cash_flow = pd.merge(balance_ttm_sets, cash_flow_ttm_sets, on="security_code")
income_ttm_sets = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.total_operating_cost, # 营业总成本
FinIncomeTTM.operating_revenue, # 营业收入
FinIncomeTTM.total_operating_revenue, # 营业总收入
FinIncomeTTM.net_profit, # 净利润
FinIncomeTTM.np_parent_company_owners, # 归属于母公司所有者的净利润
FinIncomeTTM.operating_profit, # 营业利润
], dates=[trade_date]).drop(columns, axis=1)
ttm_cash_flow = pd.merge(income_ttm_sets, ttm_cash_flow, on="security_code")
valuation_sets = get_fundamentals(query(Valuation.security_code,
Valuation.market_cap
).filter(Valuation.trade_date.in_([trade_date])))
ttm_cash_flow = pd.merge(ttm_cash_flow, valuation_sets, how='outer', on='security_code')
tp_cash_flow = pd.merge(tp_cash_flow, valuation_sets, how='outer', on='security_code')
return tp_cash_flow, ttm_cash_flow
def process_calc_factor(self, trade_date, tp_cash_flow, ttm_factor_sets):
tp_cash_flow = tp_cash_flow.set_index('security_code')
ttm_factor_sets = ttm_factor_sets.set_index('security_code')
cash_flow = factor_cash_flow.FactorCashFlow()
cash_flow_sets = | pd.DataFrame() | pandas.DataFrame |
def no_sentence_in_voice_column(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from dateutil.parser import parse
import validators
file_name="No_sentence_in_voice_column.py"
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="No_sentence_in_voice_column"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
strings_to_apply=to_check['strings_to_apply']
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index, row in df.iterrows():
column_value=row['VOICE_ONLY']
if(pd.notnull(row['VOICE_ONLY'])):
for string in strings_to_apply:
if(string in column_value):
#print(index)
entry=[index,fleName,'VOICE_ONLY column has '+string+' in its contents']
print('The row '+str(index)+' in the file '+fleName+' has the text\' '+string+' \'in the voice_only column')
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if(ExcelFile(target).sheet_names[0] == 'Sheet1'):
with ExcelWriter(target, engine='openpyxl', mode='w') as writer:
df1.to_excel(writer,sheet_name=rule,index=False)
else:
with | ExcelWriter(target, engine='openpyxl', mode='a') | pandas.ExcelWriter |
import pandas as pd
import numpy as np
from functions import fao_regions as regions
data = 'data/'
def data_build(crop_proxie, diet_div_crop, diet_source_crop, diet_ls_only, diet_ls_only_source, min_waste):
"""*** Import of country data to build national diets ***"""
WPR_height = pd.read_csv(r"data/worldpopulationreview_height_data.csv")
WPR_height.loc[WPR_height.Area == "North Korea", "Area"] = "Democratic People's Republic of Korea"
Countrycodes = pd.read_csv(r"data/countrycodes.csv", sep = ";")
#FAO_pop = pd.read_excel(data+"/FAOSTAT_Population_v3.xlsx")
FAO_pop = pd.read_excel(data+"/FAOSTAT_2018_population.xlsx")
FAO_pop.loc[FAO_pop.Area == "Cote d'Ivoire", "Area"] = "Côte d'Ivoire"
FAO_pop.loc[FAO_pop.Area == "French Guyana", "Area"] = "French Guiana"
FAO_pop.loc[FAO_pop.Area == "Réunion", "Area"] = "Réunion"
"""*** Import and sorting of data ***"""
FAO_crops = pd.read_csv(data+"/FAOSTAT_crop Production.csv")
FAO_crops["group"] = FAO_crops.apply(lambda x: regions.group(x["Item Code"]), axis=1)
FAO_crops = FAO_crops.rename(columns={"Value" : "Production"})
FAO_crops["Production"] = FAO_crops["Production"] / 1000
FAO_crops["Unit"] = "1000 tonnes"
FAO_animals = pd.read_csv(data+"/FAO_animal_prod_2016.csv")
FAO_animals["group"] = FAO_animals.apply(lambda x: regions.group(x["Item Code"]), axis=1)
FAO_animals.loc[FAO_animals.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_animals = FAO_animals.rename(columns={"Value" : "Production"})
FAO_animals.drop(FAO_animals[FAO_animals.Unit != 'tonnes'].index, inplace = True)
FAO_animals["Production"] = FAO_animals["Production"] / 1000
FAO_animals["Unit"] = "1000 tonnes"
FAO_animals_5 = pd.read_csv(data+"/FAOSTAT_animal_prod_5.csv")
FAO_animals_5["group"] = FAO_animals_5.apply(lambda x: regions.group(x["Item Code (FAO)"]), axis=1)
FAO_animals_5.loc[FAO_animals_5.Area == "United Kingdom of Great Britain and Northern Ireland", "Area"] = "United Kingdom"
FAO_animals_5 = FAO_animals_5.rename(columns={"Value" : "Production"})
FAO_animals_5.drop(FAO_animals_5[FAO_animals_5.Unit != 'tonnes'].index, inplace = True)
FAO_animals_5["Production"] = FAO_animals_5["Production"] / 1000
FAO_animals_5["Unit"] = "1000 tonnes"
FAO_animals_5 = FAO_animals_5.groupby(['Area', 'Item']).mean().reset_index()
FAO_animals = pd.merge(FAO_animals, FAO_animals_5[['Area', 'Item', 'Production']], on = ["Area", "Item"], how = 'left')
FAO_animals["Production"] = FAO_animals["Production_y"]
FAO_animals = FAO_animals.drop(columns = ["Production_x", "Production_y"])
FAO_fish = pd.read_csv(data+"FAOSTAT_Fish.csv")
FAO_fish = FAO_fish.rename(columns={"Value" : "Production"})
FAO_fish["group"] = FAO_fish.apply(lambda x: regions.group(x["Item Code"]), axis=1)
meat_products = ["eggs", "beef and lamb", "chicken and other poultry",\
"pork", "whole milk or derivative equivalents"]
fish_products = ["Freshwater Fish", "Demersal Fish", "Pelagic Fish",\
"Marine Fish, Other", "Crustaceans", "Cephalopods",\
"Molluscs, Other", "Meat, Aquatic Mammals", "Aquatic Animals, Others",
"Aquatic Plants", "Fish, Body Oil", "Fish, Liver Oil"]
other_items = ["Honey, natural", "Beeswax", "Silk-worm cocoons, reelable"]
other_items = ["Beeswax", "Silk-worm cocoons, reelable"]
"""*** Import of protein data ***"""
FAO_Protein = pd.read_csv(data+"protein.csv")
FAO_Protein["group"] = FAO_Protein["group"].str.replace("dairy", "whole milk or derivative equivalents")
FAO_Protein = FAO_Protein.rename(columns = {"Country": "Area"})
"""*** Build main dataframe ***"""
POM_data = | pd.concat([FAO_crops]) | pandas.concat |
# embedding
from sklearn.manifold import TSNE
import pandas as pd
import matplotlib.pyplot as plt
from plotnine import aes, geom_point, ggplot, theme_bw
def visualize_embedding(multinet, labels=None, verbose=True):
embedding = multinet.embedding
X = embedding[0]
indices = embedding[1]
if verbose:
print("------ Starting embedding visualization -------")
if labels:
# optionally match indices to labels and add a column
label_vector = [labels[x] for x in indices]
X_embedded = TSNE(n_components=2).fit_transform(X)
dfr = | pd.DataFrame(X_embedded, columns=['dim1', 'dim2']) | pandas.DataFrame |
"""Python module for manipulating datasets."""
from __future__ import absolute_import
import random
import os
import os.path
import logging
import multiprocessing
from functools import partial
import pandas as pd
import numpy as np
from sklearn import cross_validation, preprocessing
from sklearn.decomposition import PCA
from . import fileutils
def first(iterable):
"""Returns the first element of an iterable"""
for element in iterable:
return element
class SegmentCrossValidator:
"""Wrapper for the scikit_learn CV generators to generate folds on a segment basis."""
def __init__(self, dataframe, base_cv=None, **cv_kwargs):
# We create a copy of the dataframe with a new last level
# index which is an enumeration of the rows (like proper indices)
self.all_segments = pd.DataFrame({'Preictal': dataframe['Preictal'], 'i': np.arange(len(dataframe))})
self.all_segments.set_index('i', append=True, inplace=True)
# Now create a series with only the segments as rows. This is what we will pass into the wrapped cross
# validation generator
self.segments = self.all_segments['Preictal'].groupby(level='segment').first()
self.segments.sort(inplace=True)
if base_cv is None:
self.cv = cross_validation.StratifiedKFold(self.segments, **cv_kwargs)
else:
self.cv = base_cv(self.segments, **cv_kwargs)
def __iter__(self):
"""
Return a generator object which returns a pair of indices for every iteration.
"""
for training_indices, test_indices in self.cv:
# The indices returned from self.cv are relative to the segment name data series, we pick out the segment
# names they belong to
training_segments = list(self.segments[training_indices].index)
test_segments = list(self.segments[test_indices].index)
# Now that we have the segment names, we pick out the rows in the properly indexed dataframe
all_training_indices = self.all_segments.loc[training_segments]
all_test_indices = self.all_segments.loc[test_segments]
# Now pick out the values for only the 'i' level index of the rows which matched the segment names
original_df_training_indices = all_training_indices.index.get_level_values('i')
original_df_test_indices = all_test_indices.index.get_level_values('i')
yield original_df_training_indices, original_df_test_indices
def __len__(self):
return len(self.cv)
def mean(*dataframes):
"""Returns the means of the given dataframe(s), calculated without
concatenating the frame"""
lengths = sum([len(dataframe) for dataframe in dataframes])
sums = dataframes[0].sum()
for dataframe in dataframes[1:]:
sums += dataframe.sum()
means = sums / lengths
return means
def transform(transformation, interictal, preictal, test):
"""
Performs a transformation on the supplied *interictal*, *preictal* and *test* Pandas dataframes.
:param transformation: An object that implements the fit_transform function, which applies a transformation
to a numpy array.
:param interictal: Pandas Dataframe containing the interictal samples
:param preictal: Pandas Dataframe containing the preictal samples
:param test: Pandas Dataframe containing the test samples
:return: A List containing the three input Dataframes, with the *transformation* applied to them
"""
if not hasattr(transformation, 'fit_transform'):
logging.warning(
"Transformation {} has not fit_transform function, no transformation applied".format(transformation))
return [interictal, preictal, test]
interictal = interictal.drop('Preictal', axis=1)
preictal = preictal.drop('Preictal', axis=1)
# Keep structure info as we will need to rebuild the dataframes
interictal_index = interictal.index
interictal_columns = interictal.columns
preictal_index = preictal.index
preictal_columns = preictal.columns
test_index = test.index
test_columns = test.columns
inter_samples = interictal.shape[0]
# Concatenate the training data as we will use those for
# fitting the scaler
# This can be quite memory intensive, especially if the
# dataframes are big
training_frame = | pd.concat([interictal, preictal], axis=0) | pandas.concat |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp('2015-02-10')
test_start_date = pd.Timestamp('2015-01-06', tz='utc')
test_end_date = pd.Timestamp('2015-01-12', tz='utc')
split_adjusted_asof = pd.Timestamp('2015-01-08')
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
'estimate1': [1100., 1200.],
'estimate2': [2100., 2200.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-08'),
pd.Timestamp('2015-01-11')],
'estimate1': [1110., 1210.],
'estimate2': [2110., 2210.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
})
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (.3, 3.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
sid_1_splits = pd.DataFrame({
SID_FIELD_NAME: 1,
'ratio': (.4, 4.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(
WithSplitAdjustedMultipleEstimateColumns, cls
).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]['estimate1'])
assert_almost_equal(estimate2, timelines[today]['estimate2'])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(
estimate1, timelines_1q_out[today]['estimate1']
)
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(
estimate2, timelines_2q_out[today]['estimate2']
)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est1': SomeFactor1(), 'est2': SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 1110.]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 2110.]])
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 1110. * 4]] +
[[1100 * 3., 1110. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 2110. * 4]] +
[[2100 * 3., 2110. * 4]])
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[1200 * 3., 1210. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2200 * 3., 2210. * 4]])
}
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2100 * 3., 2110. * 4]])
}
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithMultipleEstimateColumns(
PreviousWithSplitAdjustedMultipleEstimateColumns
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
class NextWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[1100. * 1/.3, 1110. * 1/.4]] * 2),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[2100. * 1/.3, 2110. * 1/.4]] * 2),
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[1100 * 3., 1210. * 4]] * 3),
'estimate2': np.array([[2100 * 3., 2210. * 4]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate1': np.array([[1200 * 3., np.NaN]] * 3),
'estimate2': np.array([[2200 * 3., np.NaN]] * 3)
}
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] +
[[2200 * 1/.3, 2210. * 1/.4]] * 2)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate2': np.array([[2200., 2210.]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate2': np.array([[2200, 2210.]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate2': np.array([[2200 * 3., np.NaN]] * 3)
},
| pd.Timestamp('2015-01-12', tz='utc') | pandas.Timestamp |
import os
import pandas as pd
import json
import re
import gc
from configparser import ConfigParser
from pathlib import Path
from typing import List, Dict, Union, Text, Tuple, Iterable
from numbers import Number
from pathlib import Path
from selenium.webdriver import Firefox
from selenium.webdriver import firefox
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import NoSuchElementException
from scripts.crawler import AcordaosTCU
import sqlite3
firefox_webelements = firefox.webelement.FirefoxWebElement
firefox_webdriver = firefox.webdriver.WebDriver
def parse_json_year_date(year: Number, fullpath: Path) -> Union[Path, None]:
"""
Filtra os arquivos json por ano.
"""
if not isinstance(fullpath, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
pattern_finder = re.search(f"_{year}\.json", fullpath.name)
if pattern_finder:
return fullpath
else:
return None
def load_into_dataframe(jsonFile: List[Dict]) -> pd.DataFrame:
"""
Cria uma DataFrame a partir de uma lista de dicionários (JSON).
"""
# container para armazenar arquivos json
container_of_json = []
for file in jsonFile:
with open(file, "r", encoding="utf8") as f:
d = json.load(f)
container_of_json.append(d)
# container of dataframes
container_of_dataframes = []
for data in container_of_json:
df = pd.read_json(json.dumps(data), orient="records", encoding="utf8")
container_of_dataframes.append(df)
df = pd.concat(container_of_dataframes)
return df
def get_urn(pattern: str, df: pd.DataFrame) -> Dict:
"""
Recebe padrão de urn e coleta todos as ocorrências no dataframe.
"""
urn_container = {}
for index, row in df.iterrows():
if type(row["urn"]) == list:
for data in row["urn"]:
if pattern in data:
if pattern in urn_container:
continue
else:
urn_container[row["urn"]] = row["url"]
else:
if pattern in row["urn"]:
if pattern in urn_container:
continue
else:
urn_container[row["urn"]] = row["url"]
return urn_container
def select_files_based_on_year(path: Path, year: str) -> List[Path]:
"""
Seleciona os arquivos baseado no ano indicado em seus respectivos nomes.
"""
if not isinstance(path, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
container_of_json_year = []
path_to_str = str(path.absolute())
for dirname, _, filenames in os.walk(path_to_str):
for filename in filenames:
path_filename = Path(os.path.join(dirname, filename))
check_pattern = parse_json_year_date(year, path_filename)
if check_pattern:
container_of_json_year.append(path_filename)
return container_of_json_year
def pipeline_to_get_urn(
path: Path, years: List[str], patterns: List[str]
) -> (List[Dict], List[int]):
"""
Pipeline para coletar as urns de um determinado padrão ao longo de vários arquivos.
Atributos:
path: diretório onde estão os arquivos json
years: list de anos que se deseja coletar os dados
pattern: a substring oriunda de uma URN que se deseja buscar
"""
if not isinstance(path, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
container = []
if not isinstance(years, List):
raise TypeError("O parâmetro years precisa ser uma lista.")
if not isinstance(patterns, List):
raise TypeError("O parâmetro patterns precisa ser uma lista.")
#criar container para armazenar os anos que possuem dados
filtered_years = []
for year in years:
container_of_json_year = select_files_based_on_year(path, year)
if not container_of_json_year:
print(f"Não há dados relativos ao {path} e {year}.")
continue
# sort by filename
container_of_json_year = sorted(
container_of_json_year, key=lambda x: int(x.name.split("_")[0])
)
# carrega os dados
df = load_into_dataframe(container_of_json_year)
for pattern in patterns:
print(
f"Iniciando a coleta das urn para o padrão {pattern} na base anual {year}."
)
urn_list = get_urn(pattern, df)
container.append(urn_list)
del urn_list
filtered_years.append(year)
del df
gc.collect()
return container, filtered_years
def create_df_for_urn_data_and_save(data: Dict, filename: str) -> None:
x = | pd.DataFrame.from_dict(data, orient="index") | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""Get DrugCentral as OBO."""
import logging
from typing import Iterable
import bioversions
import pandas as pd
from pyobo.struct import Obo, Reference, Term
from pyobo.utils.path import ensure_df
logger = logging.getLogger(__name__)
PREFIX = "drugcentral"
URL = "http://unmtid-shinyapps.net/download/structures.smiles.tsv"
def get_obo(force: bool = False) -> Obo:
"""Get DrugCentral OBO."""
version = bioversions.get_version(PREFIX)
return Obo(
ontology=PREFIX,
name="DrugCentral",
data_version=version,
iter_terms=iter_terms,
iter_terms_kwargs=dict(version=version, force=force),
auto_generated_by=f"bio2obo:{PREFIX}",
)
def iter_terms(version: str, force: bool = False) -> Iterable[Term]:
"""Iterate over DrugCentral terms."""
df = ensure_df(PREFIX, url=URL, version=version, force=force)
for smiles, inchi, inchi_key, drugcentral_id, drugcentral_name, cas in df.values:
if pd.isna(smiles) or pd.isna(inchi) or pd.isna(inchi_key):
logger.warning("missing data for drugcentral:%s", drugcentral_id)
continue
term = Term.from_triple(prefix=PREFIX, identifier=drugcentral_id, name=drugcentral_name)
term.append_xref(Reference(prefix="inchikey", identifier=inchi_key))
term.append_property("smiles", smiles)
term.append_property("inchi", inchi)
if | pd.notna(cas) | pandas.notna |
"""
Copyright (c) 2020, <NAME> <NAME>
All rights reserved.
This is an information tool to retrieve official business financials (income statements, balance sheets, and cashflow statements) for a sepcified range of times. The code aims to be as vallina as possible by minimizing the depndencies and packages ued to construct functions. This code can be used immediately off the shelf and assumes no more than the following packages to be installed. As a reminder, please ensure that your directory has enough space, ideally at least 100 MB for newly serialized reports to reside on the disk until you decide to clear them.
"""
import libraries
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
from selenium import webdriver
import os
import pickle
class Business:
# Define a default constructor for the Business object
def __init__(self, foreign, symbol, report_type, start_period, end_period ):
self.foreign=foreign
self.symbol=symbol
self.report_type=report_type
self.start_period=start_period
self.end_period=end_period
#-------------Retrieving Annual/Quarter Reports----------
# Define a function to store the url(s) to a company's annual or quarter report(s)
def ghost_report_url(self):
############## Check validity of inputs #############
## Error Message if the foreign argument is not logical
if (type(self.foreign)!=bool):
raise TypeError("Invalid foreign type: foreign argument must be logical- True or False")
## Error message if the inputted ticker symbol is not a string
if(type(self.symbol)!=str):
raise TypeError("Invalid ticker symbol type: symbol argument must be a string")
## Error message if the inputted report type is neither 'annual' or 'quarter'
if(self.report_type!='annual' and self.report_type!='quarter'):
raise TypeError("Invalid report type: only 'annual' or 'quarter' report type is allowed")
## Error message if the specified start period or(and) end period is(are) not valid
if ((len(str(self.start_period)))| (len(str(self.end_period)))!=8):
raise ValueError("Invalid start period or(and) end period(s): start_period and end_period arguments must be in the form yyyymmdd")
## Error message to warn that foreign quarterly reports are not available on the SEC Edgar database
if(self.foreign==True and self.report_type=='quarter'):
raise ValueError("Foreign quarterly report(s) not available: try 'annual' report instead")
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
################# Retrieving Annual Report(s) (10-K or 20-F) ################
if(self.report_type=='annual'):
# Get the url to the company's historic 10-K (including 10-K/A) or 20-F (including 20-F/A) filings(s)
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-k&dateb=&owner=exclude&count=100" if self.foreign==False else r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=20-f&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-K(include 10-K/A and others) or 20-F(include 20F/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-K or 20-F, given the company symbol and foreign logic
if len(filings_description_table[(filings_description_table["Filings"]=="10-K")|(filings_description_table["Filings"]=="20-F")])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-K or 20-F filing(s). raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[(filings_description_table["Filings"]=="10-K")| (filings_description_table["Filings"]=="20-F")].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-K or 20-F report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-K or 20-F report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
# Get report period(s), that is the 10-K or 20-F report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-K or 20F extracts
annual_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
annual_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
annual_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the annual report html
annual_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
annual_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
annual_report_url.append("annual report is not in HTML format")
else:
annual_report_url.append("annual report not available")
# Combine the company's report period(s), and annual report url(s) into a data frame
annual_report_df=pd.DataFrame({'report_periods':report_periods,'annual_report_url':annual_report_url,'annual_download_url':annual_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not annual_report_df.empty:
return annual_report_df
else:
return "No annual report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
################# Retrieving Quarter Report(s) (10-Q) #########################
if(self.report_type=='quarter'):
# Get the url to the company's historic 10-Q
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-q&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-Q(include 10-Q/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-Q, given the company symbol and foreign logic
if len(filings_description_table[filings_description_table["Filings"]=="10-Q"])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-Q. raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[filings_description_table["Filings"]=="10-Q"].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-Q report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-Q report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
## At this moment, documents before 2009 are not available. Documents of this type are not normally needed anyway
# Get report period(s), that is the 10-Q report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-Q extracts
quarter_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
quarter_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
quarter_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the quarterly report html
quarter_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
quarter_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
quarter_report_url.append("quarterly report is not in HTML format")
else:
quarter_report_url.append("quarterly report not available")
# Combine the company's report period(s), and quarterly report url(s) into a data frame
quarter_report_df=pd.DataFrame({'report_periods':report_periods,'quarter_report_url':quarter_report_url,'quarter_download_url':quarter_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not quarter_report_df.empty:
return quarter_report_df
else:
return "No quarter report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
#------------------------ Best-scrolled to the most relevant financial exhibit------------------------
# A function to exhibit financial statements
def financial_statements_exhibit(self):
## Errors checked in the ghost_report_url()
# Target annual financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'Financial Statements and Supplementary Data', 'Selected Financial Data'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements and Supplementary Data').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual financial statements of foreign businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'FINANCIAL STATEMENTS', 'Financial Statements', 'Selected Financial Data'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the most relevant financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
# Since the query is case insensitive, search in other cases
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target quarter financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position','Consolidated Statements of Cash Flows','Consolidated Income Statements' 'Consolidated Statements of Operations', 'FINANCIAL STATEMENTS', 'Financial Statements'
if(self.foreign==False and self.report_type=='quarter'):
# Import quarter_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up best-scrolled financial exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter financial statements require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled balance sheet section
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#------------ Best-scrolled to the most relevant risk factor exhibit------------
# A function to exhibit risk factors
def risk_factors_exhibit(self, risk_type):
## Previous errors checked in the ghost_report_url()
## Error message if the inputted risk type is neither 'enterprise' or 'market'
if(risk_type!='enterprise' and risk_type!='market'):
raise TypeError("Invalid risk type: only 'enterprise' or 'market' risk type is allowed")
########################### Enterprise Risk Exhibit ##################################
if(risk_type=='enterprise'):
# Target annual and quarter enterprise risk factors of U.S. businesses
# Prioritize in the order of 'Risk Factors','RISK FACTORS'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual enterprise risk factors of foreign businesses
# Prioritize in the order of 'Risk Factors', 'RISK FACTORS', 'KEY INFORMATION', 'Key Information'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
try:
driver.find_element_by_partial_link_text('Key Information').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
########################### Market Risk Exhibit #############################
elif(risk_type=='market'):
# Target annual and quarter market risk factors of U.S. businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk', 'QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual market risk factors of foreign businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk','QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#----------------------------- Curate Financial Statements -----------------------------------------
# A function to curate income statements, balance sheets, and cah flow statements for U.S. and foreign businesses
def curate_financial_statements(self,statement_type):
## Error message if inputted statement type is not available
if(statement_type!='income' and statement_type!='balance' and statement_type!='cashflow'):
raise TypeError("Statement type not available: only 'income', 'balance', or 'cashflow' statement type is allowed")
# Probable names for statement selection- may nave to update identifiers as different company uses different statement names
income_terms=['Consolidated Income Statement', 'Consolidated Statements of Income', 'Consolidated Statements of Earnings', 'Consolidated Statements of Operations','Consolidated Statements of Profit or Loss','Profit and Loss Statement','P&L Statement','P/L Statement','Consolidated Income Statement','Consolidated Statement of Income', 'Consolidated Statement of Earnings','Consolidated Statement of Operations','Consolidated Statement of Profit or Loss','Consolidated Profit and Loss Statement','Consolidated P&L Statement','Consolidated P/L Statement','Statement of Consolidated Operations','Statements of Consolidated Operations','Statement of Combined Operation','Statements of Combined Operation']
balance_terms=['Consolidated Balance Sheets', 'Consolidated Balance Sheet','Consolidated Statements of Financial Position', 'Consolidated Statements of Financial Condition','Consolidated Statement of Financial Positions','Consolidated Statement of Financial Conditions', 'Statement of Consolidated Financial Position','Statements of Consolidated Financial Position', 'Statement of Consolidated Financial Condition', 'Statements of Consolidated Financial Condition','Combined Balance Sheet']
cashflow_terms=['Consolidated Statements of Cash Flows','Consolidated Statement of Cash Flows','Cash Flow Statement','Consolidated Cash Flow Statement', 'Statement of Consolidated Cash Flows','Statements of Consolidated Cash Flows','Statement of Combined Cash Flow','Statements of Combined Cash Flow']
# Set root diectory for file access
root_path=os.getcwd()
########### Extract Annual and Quarter Financial Statements (U.S. and foreign businesses)#################
# Retrieve periods and url(s) from the url table called by ghost_report_url()
report_table=self.ghost_report_url()
report_periods=report_table.report_periods.to_list()
if(self.report_type=='annual'):
download_url_container=report_table.annual_download_url.to_list() # container to store the download urls of annual statements
elif(self.report_type=='quarter'):
download_url_container=report_table.quarter_download_url.to_list() # container to store the download urls of quarter statements
# Designate a directory to store downloaded statements (begin statement piling)
statement_pile_path=os.path.join(root_path,'statement_pile')
company_pile_path=os.path.join(statement_pile_path,self.symbol)
try:
os.mkdir(statement_pile_path) # Create the statement_pile_path path
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
try:
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
os.chdir(company_pile_path)
# Downlaod accessible statements into the statement_pile path
# Construct a data frame to store the specified statement type
period_container=[] # container to store statement periods
statement_container=[] # container to store statement table
for url_index in range(len(download_url_container)):
statement_period=report_periods[url_index].strftime("%Y-%m-%d")
if(download_url_container[url_index] is not None and download_url_container[url_index][download_url_container[url_index].rfind('.')+1:len(download_url_container[url_index])]!='xls'):
statement_file=requests.get(download_url_container[url_index])
file_name=self.symbol+statement_period+self.report_type+'.xlsx'
with open(file_name, 'wb+') as fs:
fs.write(statement_file.content) # populating statement contents
dfs=pd.ExcelFile(fs)
sheet_headers=list(map(lambda x: x.lower().replace(' ','').replace('_','').replace('-','').replace(',','').replace("'","").replace('&','').replace('/',''), [dfs.parse(sn).columns[0] for sn in dfs.sheet_names]))
############################ Income Statements ###################################
if (statement_type=='income'):
income_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''),income_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in income_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify income statement and store in dataframe form
income_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store income statement into the statement container
statement_container.append(income_statement)
# Store income statement period into the period container
period_container.append(statement_period)
# Serialize the income statement dataframe into '.pickle'- to be accessed faster next time
income_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store income statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store income statement period into the period container
period_container.append(statement_period)
# Message to warn that income statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' income statement not identified or not available: update income statement identifiers or pass')
############################ Balance Sheets ###################################
if (statement_type=='balance'):
balance_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), balance_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in balance_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify balance sheet and store in dataframe form
balance_sheet=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store balacne sheet into the statement container
statement_container.append(balance_sheet)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Serialize the balance sheet dataframe into '.pickle'- to be accessed faster next time
balance_sheet.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store balance sheet as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Message to warn that balance sheet may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' balance sheet not identified or not available: update balance sheet identifiers or pass')
############################ Cash Flow Statements ###################################
if (statement_type=='cashflow'):
cashflow_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), cashflow_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in cashflow_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify cash flow statement and store in dataframe form
cashflow_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store cash flow statement into the statement container
statement_container.append(cashflow_statement)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Serialize the cash flow statement dataframe into '.pickle'- to be accessed faster next time
cashflow_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store cash flow statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Message to warn that cash flow statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' cashflow statement not identified or not available: update cash flow statement identifiers or pass')
fs.close() # close the downloaded '.xlsx' file
os.remove(file_name) # remove the downloaded '.xlsx' file after extracting financial statements
else:
print(self.symbol+' '+statement_period+' '+self.report_type+' '+statement_type+' statement not available')
# Combine the conpany's income statement(s) or balance sheet(s) or cash flow statement(s), and statement periods into a dataframe
statement_df=pd.DataFrame({'statement_periods':period_container,statement_type+'_statement':statement_container},index=[self.symbol]*len(period_container))
# Return back to root_path (end statement piling)
os.chdir(root_path)
# Return the data frame contructed above if it is not empty
if not statement_df.empty:
return statement_df
else:
return 'No '+self.report_type+' '+statement_type+' statement for '+self.symbol+' between '+self.start_period.strftime("%Y-%m-%d")+' and '+self.end_period.strftime("%Y-%m-%d")
#------------------------Extract Most Recent Income Statements--------------------------------
def ghost_income(self):
bin_path=r'.\\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualIncome" in s for s in bin_files]):
annual_income_file=[s for s in bin_files if "AnnualIncome" in s]
annual_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_income_file))
annual_income_file=[annual_income_file[i] for i in range(len(annual_income_file)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_periods=[annual_income_periods[i] for i in range(len(annual_income_periods)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_file.reverse()
annual_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[6]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[5]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[4]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[2]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]), | pd.read_pickle(bin_path+'\\'+annual_income_file[1]) | pandas.read_pickle |
import datetime
import json
import typing
import pandas as pd
import requests
URL = "https://www.tpex.org.tw/web/stock/aftertrading/otc_quotes_no1430/stk_wn1430_result.php?l=zh-tw&d={}&se=AL&_={}"
# 網頁瀏覽時, 所帶的 request header 參數, 模仿瀏覽器發送 request
HEADER = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Connection": "keep-alive",
"Host": "www.tpex.org.tw",
"Referer": "https://www.tpex.org.tw/web/stock/aftertrading/otc_quotes_no1430/stk_wn1430.php?l=zh-tw",
"sec-ch-ua": '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "Windows",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
def crawler(parameters: typing.Dict[str, str]):
crawler_date = parameters.get("crawler_date", "")
crawler_date = crawler_date.replace(
crawler_date.split("-")[0], str(int(crawler_date.split("-")[0]) - 1911)
)
crawler_date = crawler_date.replace("-", "/")
crawler_timestamp = int(datetime.datetime.now().timestamp())
resp = requests.get(
url=URL.format(crawler_date, crawler_timestamp), headers=HEADER
)
columns = [
"stock_id",
"stock_name",
"close",
"open",
"max",
"min",
]
if resp.ok:
resp_data = json.loads(resp.text)
data = pd.DataFrame(resp_data["aaData"])
data = data[[0, 1, 2, 4, 5, 6]]
data.columns = columns
data["date"] = parameters.get("crawler_date", "")
else:
data = | pd.DataFrame() | pandas.DataFrame |
"""\
Data structures for expt.
The "Experiment" data is structured like a 4D array, i.e.
Experiment := [hypothesis_name, run_index, index, column]
The data is structured in the following ways (from higher to lower level):
Experiment (~= Dict[str, List[DataFrame]]):
An experiment consists of one or multiple Hypotheses (e.g. different
hyperparameters or algorithms) that can be compared with one another.
Hypothesis (~= List[DataFrame], or RunGroup):
A Hypothesis consists of several `Run`s that share an
identical experimental setups (e.g. hyperparameters).
This usually corresponds to one single curve for each model.
It may also contain additional metadata of the experiment.
Run (~= DataFrame == [index, column]):
Contains a pandas DataFrame (a table-like structure, str -> Series)
as well as more metadata (e.g. path, seed, etc.)
Note that one can also manage a collection of Experiments (e.g. the same set
of hypotheses or algorithms applied over different environments or dataset).
"""
import collections
import fnmatch
import itertools
import os.path
import re
import sys
import types
from dataclasses import dataclass # for python 3.6, backport needed
from multiprocessing.pool import Pool as MultiprocessPool
from multiprocessing.pool import ThreadPool
from typing import (Any, Callable, Dict, Generator, Iterable, Iterator, List,
Mapping, MutableMapping, Optional, Sequence, Set, Tuple,
TypeVar, Union)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.core.groupby.generic import DataFrameGroupBy
from typeguard import typechecked
from . import plot as _plot
from . import util
from .path_util import exists, glob, isdir, open
T = TypeVar('T')
try:
from tqdm.auto import tqdm
except:
tqdm = util.NoopTqdm
#########################################################################
# Data Classes
#########################################################################
@dataclass
class Run:
"""Represents a single run, containing one pd.DataFrame object
as well as other metadata (path, etc.)
"""
path: str
df: pd.DataFrame
@classmethod
def of(cls, o):
"""A static factory method."""
if isinstance(o, Run):
return Run(path=o.path, df=o.df)
elif isinstance(o, pd.DataFrame):
return cls.from_dataframe(o)
raise TypeError("Unknown type {}".format(type(o)))
@classmethod
@typechecked
def from_dataframe(cls, df: pd.DataFrame):
run = cls(path='', df=df)
if hasattr(df, 'path'):
run.path = df.path
return run
def __repr__(self):
return 'Run({path!r}, df with {rows} rows)'.format(
path=self.path, rows=len(self.df))
@property
def columns(self) -> Sequence[str]:
"""Returns all column names."""
return list(self.df.columns) # type: ignore
@property
def name(self) -> str:
"""Returns the last segment of the path."""
path = self.path.rstrip('/')
return os.path.basename(path)
def to_hypothesis(self) -> 'Hypothesis':
"""Create a new `Hypothesis` consisting of only this run."""
return Hypothesis.of(self)
def plot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().plot(*args, subplots=subplots, **kwargs)
def hvplot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().hvplot(*args, subplots=subplots, **kwargs)
class RunList(Sequence[Run]):
"""A (immutable) list of Run objects, but with some useful utility
methods such as filtering, searching, and handy format conversion."""
def __init__(self, runs: Iterable[Run]):
runs = self._validate_type(runs)
self._runs = list(runs)
@classmethod
def of(cls, runs: Iterable[Run]):
if isinstance(runs, cls):
return runs # do not make a copy
else:
return cls(runs) # RunList(runs)
def _validate_type(self, runs) -> List[Run]:
if not isinstance(runs, Iterable):
raise TypeError(f"`runs` must be a Iterable, but given {type(runs)}")
if isinstance(runs, Mapping):
raise TypeError(f"`runs` should not be a dictionary, given {type(runs)} "
" (forgot to wrap with pd.DataFrame?)")
runs = list(runs)
if not all(isinstance(r, Run) for r in runs):
raise TypeError("`runs` must be a iterable of Run, "
"but given {}".format([type(r) for r in runs]))
return runs
def __getitem__(self, index_or_slice):
o = self._runs[index_or_slice]
if isinstance(index_or_slice, slice):
o = RunList(o)
return o
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'RunList' object is not an iterator.")
def __len__(self):
return len(self._runs)
def __repr__(self):
return "RunList([\n " + "\n ".join(repr(r) for r in self._runs) + "\n]"
def extend(self, more_runs: Iterable[Run]):
self._runs.extend(more_runs)
def to_list(self) -> List[Run]:
"""Create a new copy of list containing all the runs."""
return list(self._runs)
def to_dataframe(self) -> pd.DataFrame:
"""Return a DataFrame consisting of columns `name` and `run`."""
return pd.DataFrame({
'name': [r.name for r in self._runs],
'run': self._runs,
})
def filter(self, fn: Union[Callable[[Run], bool], str]) -> 'RunList':
"""Apply a filter function (Run -> bool) and return the filtered runs
as another RunList. If a string is given, we convert it as a matcher
function (see fnmatch) that matches `run.name`."""
if isinstance(fn, str):
pat = str(fn)
fn = lambda run: fnmatch.fnmatch(run.name, pat)
return RunList(filter(fn, self._runs))
def grep(self, regex: Union[str, 're.Pattern'], flags=0):
"""Apply a regex-based filter on the path of `Run`, and return the
matched `Run`s as a RunList."""
if isinstance(regex, str):
regex = re.compile(regex, flags=flags)
return self.filter(lambda r: bool(regex.search(r.path)))
def map(self, func: Callable[[Run], Any]) -> List:
"""Apply func for each of the runs. Return the transformation
as a plain list."""
return list(map(func, self._runs))
def to_hypothesis(self, name: str) -> 'Hypothesis':
"""Create a new Hypothesis instance containing all the runs
as the current RunList instance."""
return Hypothesis.of(self, name=name)
def groupby(
self,
by: Callable[[Run], T],
*,
name: Callable[[T], str] = str,
) -> Iterator[Tuple[T, 'Hypothesis']]:
r"""Group runs into hypotheses with the key function `by` (Run -> key).
This will enumerate tuples (`group_key`, Hypothesis) where `group_key`
is the result of the key function for each group, and a Hypothesis
object (with name `name(group_key)`) will consist of all the runs
mapped to the same group.
Args:
by: a key function for groupby operation. (Run -> Key)
name: a function that maps the group (Key) into Hypothesis name (str).
Example:
>>> key_func = lambda run: re.search("algo=(\w+),lr=([.0-9]+)", run.name).group(1, 2)
>>> for group_name, hypothesis in runs.groupby(key_func):
>>> ...
"""
series = pd.Series(self._runs)
groupby = series.groupby(lambda i: by(series[i]))
group: T
for group, runs_in_group in groupby:
yield group, Hypothesis.of(runs_in_group, name=name(group))
def extract(self, pat: str, flags: int = 0) -> pd.DataFrame:
r"""Extract capture groups in the regex pattern `pat` as columns.
Example:
>>> runs[0].name
"ppo-halfcheetah-seed0"
>>> df = runs.extract(r"(?P<algo>[\w]+)-(?P<env_id>[\w]+)-seed(?P<seed>[\d]+)")
>>> assert list(df.columns) == ['algo', 'env_id', 'seed', 'run']
"""
df: pd.DataFrame = self.to_dataframe()
df = df['name'].str.extract(pat, flags=flags)
df['run'] = list(self._runs)
return df
@dataclass
class Hypothesis(Iterable[Run]):
name: str
runs: RunList
def __init__(self, name: str, runs: Union[Run, Iterable[Run]]):
if isinstance(runs, Run) or isinstance(runs, pd.DataFrame):
if not isinstance(runs, Run):
runs = Run.of(runs)
runs = [runs] # type: ignore
self.name = name
self.runs = RunList(runs)
def __iter__(self) -> Iterator[Run]:
return iter(self.runs)
@classmethod
def of(cls,
runs: Union[Run, Iterable[Run]],
*,
name: Optional[str] = None) -> 'Hypothesis':
"""A static factory method."""
if isinstance(runs, Run):
name = name or runs.path
return cls(name=name or '', runs=runs)
def __getitem__(self, k):
if isinstance(k, int):
return self.runs[k]
if k not in self.columns:
raise KeyError(k)
return pd.DataFrame({r.path: r.df[k] for r in self.runs})
def __repr__(self) -> str:
return f"Hypothesis({self.name!r}, <{len(self.runs)} runs>)"
def __len__(self) -> int:
return len(self.runs)
def __hash__(self):
return hash(id(self))
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'Hypothesis' object is not an iterator.")
def describe(self) -> pd.DataFrame:
"""Report a descriptive statistics as a DataFrame,
after aggregating all runs (e.g., mean)."""
return self.mean().describe()
def summary(self) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current hypothesis."""
return Experiment(self.name, [self]).summary()
# see module expt.plot
plot = CachedAccessor("plot", _plot.HypothesisPlotter)
plot.__doc__ = _plot.HypothesisPlotter.__doc__
hvplot = CachedAccessor("hvplot", _plot.HypothesisHvPlotter)
hvplot.__doc__ = _plot.HypothesisHvPlotter.__doc__
@property
def grouped(self) -> DataFrameGroupBy:
return pd.concat(self._dataframes, sort=False).groupby(level=0)
def empty(self) -> bool:
sentinel = object()
return next(iter(self.grouped), sentinel) is sentinel # O(1)
@property
def _dataframes(self) -> List[pd.DataFrame]:
"""Get all dataframes associated with all the runs."""
def _get_df(o):
if isinstance(o, pd.DataFrame):
return o
else:
return o.df
return [_get_df(r) for r in self.runs]
@property
def columns(self) -> Iterable[str]:
return util.merge_list(*[df.columns for df in self._dataframes])
def rolling(self, *args, **kwargs):
return self.grouped.rolling(*args, **kwargs)
def mean(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.mean(*args, **kwargs)
def std(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.std(*args, **kwargs)
def min(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.min(*args, **kwargs)
def max(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.max(*args, **kwargs)
class Experiment(Iterable[Hypothesis]):
@typechecked
def __init__(
self,
name: Optional[str] = None,
hypotheses: Iterable[Hypothesis] = None,
):
self._name = name if name is not None else ""
self._hypotheses: MutableMapping[str, Hypothesis]
self._hypotheses = collections.OrderedDict()
if isinstance(hypotheses, np.ndarray):
hypotheses = list(hypotheses)
for h in (hypotheses or []):
if not isinstance(h, Hypothesis):
raise TypeError("An element of hypotheses contains a wrong type: "
"expected {}, but given {} ".format(
Hypothesis, type(h)))
if h.name in self._hypotheses:
raise ValueError(f"Duplicate hypothesis name: `{h.name}`")
self._hypotheses[h.name] = h
@classmethod
def from_dataframe(
cls,
df: pd.DataFrame,
by: Optional[Union[str, List[str]]] = None,
*,
run_column: str = 'run',
hypothesis_namer: Callable[..., str] = str,
name: Optional[str] = None,
) -> 'Experiment':
"""Constructs a new Experiment object from a DataFrame instance
structured as per the convention.
Args:
by (str, List[str]): The column name to group by. If None (default),
it will try to automatically determine from the dataframe if there
is only one column other than `run_column`.
run_column (str): The column name that contains `Run` objects.
See also `RunList.to_dataframe()`.
hypothesis_namer: This is a mapping that transforms the group key
(a str or tuple) that pandas groupby produces into hypothesis name.
This function should take one positional argument for the group key.
name: The name for the produced `Experiment`.
"""
if by is None:
# Automatically determine the column from df.
by_columns = list(sorted(set(df.columns).difference([run_column])))
if len(by_columns) != 1:
raise ValueError("Cannot automatically determine the column to "
"group by. Candidates: {}".format(by_columns))
by = next(iter(by_columns))
ex = Experiment(name=name)
for hypothesis_key, runs_df in df.groupby(by):
hypothesis_name = hypothesis_namer(hypothesis_key)
runs = RunList(runs_df[run_column])
h = runs.to_hypothesis(name=hypothesis_name)
ex.add_hypothesis(h)
return ex
def add_runs(
self,
hypothesis_name: str,
runs: List[Union[Run, Tuple[str, pd.DataFrame], pd.DataFrame]],
*,
color=None,
linestyle=None,
) -> Hypothesis:
def check_runs_type(runs) -> List[Run]:
if isinstance(runs, types.GeneratorType):
runs = list(runs)
if runs == []:
return []
if isinstance(runs, Run):
runs = [runs]
return [Run.of(r) for r in runs]
_runs = check_runs_type(runs)
d = Hypothesis.of(name=hypothesis_name, runs=_runs)
return self.add_hypothesis(d, extend_if_conflict=True)
@typechecked
def add_hypothesis(
self,
h: Hypothesis,
extend_if_conflict=False,
) -> Hypothesis:
if h.name in self._hypotheses:
if not extend_if_conflict:
raise ValueError(f"Hypothesis named {h.name} already exists!")
d: Hypothesis = self._hypotheses[h.name]
d.runs.extend(h.runs)
else:
self._hypotheses[h.name] = h
return self._hypotheses[h.name]
@property
def name(self) -> str:
return self._name
@property
def title(self) -> str:
return self._name
def keys(self) -> Iterable[str]:
"""Return all hypothesis names."""
return self._hypotheses.keys()
@property
def hypotheses(self) -> Sequence[Hypothesis]:
return tuple(self._hypotheses.values())
def select_top(
self,
key,
k=None,
descending=True,
) -> Union[Hypothesis, Sequence[Hypothesis]]:
"""Choose a hypothesis that has the largest value on the specified column.
Args:
key: str (y_name) or Callable(Hypothesis -> number).
k: If None, the top-1 hypothesis will be returned. Otherwise (integer),
top-k hypotheses will be returned as a tuple.
descending: If True, the hypothesis with largest value in key will be
chosen. If False, the hypothesis with smallest value will be chosen.
Returns: the top-1 hypothesis (if `k` is None) or a tuple of k hypotheses
in the order specified by `key`.
"""
if k is not None and k <= 0:
raise ValueError("k must be greater than 0.")
if k is not None and k > len(self._hypotheses):
raise ValueError("k must be smaller than the number of "
"hypotheses ({})".format(len(self._hypotheses)))
if isinstance(key, str):
y = str(key) # make a copy for closure
if descending:
key = lambda h: h.mean()[y].max()
else:
key = lambda h: h.mean()[y].min()
elif callable(key):
pass # key: Hypothesis -> scalar.
else:
raise TypeError(
f"`key` must be a str or a callable, but got: {type(key)}")
candidates = sorted(self.hypotheses, key=key, reverse=descending)
assert isinstance(candidates, list)
if k is None:
return candidates[0]
else:
return candidates[:k]
def __iter__(self) -> Iterator[Hypothesis]:
return iter(self._hypotheses.values())
def __repr__(self) -> str:
return (
f"Experiment('{self.name}', {len(self._hypotheses)} hypotheses: [ \n " +
'\n '.join([repr(exp) for exp in self.hypotheses]) + "\n])")
def __getitem__(
self,
key: Union[str, Tuple],
) -> Union[Hypothesis, np.ndarray, Run, pd.DataFrame]:
"""Return self[key].
`key` can be one of the following:
- str: The hypothesis's name to retrieve.
- int: An index [0, len(self)) in all hypothesis. A numpy-style fancy
indexing is supported.
- Tuple(hypo_key: str|int, column: str):
- The first axis is the same as previous (hypothesis' name or index)
- The second one is the column name. The return value will be same
as self[hypo_key][column].
"""
if isinstance(key, str):
name = key
return self._hypotheses[name]
elif isinstance(key, int):
try:
_keys = self._hypotheses.keys()
name = next(itertools.islice(_keys, key, None))
except StopIteration:
raise IndexError("out of range: {} (should be < {})".format(
key, len(self._hypotheses)))
return self._hypotheses[name]
elif isinstance(key, tuple):
hypo_key, column = key
hypos = self[hypo_key]
if isinstance(hypos, list):
raise NotImplementedError("2-dim fancy indexing is not implemented") # yapf: disable
return hypos[column] # type: ignore
elif isinstance(key, Iterable):
key = list(key)
if all(isinstance(k, bool) for k in key):
# fancy indexing through bool
if len(key) != len(self._hypotheses):
raise IndexError("boolean index did not match indexed array along"
" dimension 0; dimension is {} but corresponding "
" boolean dimension is {}".format(
len(self._hypotheses), len(key)))
r = np.empty(len(key), dtype=object)
r[:] = list(self._hypotheses.values())
return r[key]
else:
# fancy indexing through int? # TODO: support str
hypo_keys = list(self._hypotheses.keys())
to_key = lambda k: k if isinstance(k, str) else hypo_keys[k]
return [self._hypotheses[to_key(k)] for k in key]
else:
raise ValueError("Unsupported index: {}".format(key))
def __setitem__(
self,
name: str,
hypothesis_or_runs: Union[Hypothesis, List[Run]],
) -> Hypothesis:
"""An dict-like method for adding hypothesis or runs."""
if isinstance(hypothesis_or_runs, Hypothesis):
if hypothesis_or_runs in self._hypotheses:
raise ValueError(f"A hypothesis named {name} already exists")
self._hypotheses[name] = hypothesis_or_runs
else:
# TODO metadata (e.g. color)
self.add_runs(name, hypothesis_or_runs) # type: ignore
return self._hypotheses[name]
@property
def columns(self) -> Iterable[str]:
# merge and uniquify all columns but preserving the order.
return util.merge_list(*[h.columns for h in self._hypotheses.values()])
@staticmethod
def AGGREGATE_MEAN_LAST(portion: float):
return (lambda series: series.rolling(max(1, int(len(series) * portion))
).mean().iloc[-1]) # yapf: disable
def summary(self, columns=None, aggregate=None) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current experiments,
whose rows are all hypothesis.
Args:
columns: The list of columns to show. Defaults to `self.columns` plus
`"index"`.
aggregate: A function or a dict of functions ({column_name: ...})
specifying a strategy to aggregate a `Series`. Defaults to take the
average of the last 10% of the series.
Example Usage:
>>> pd.set_option('display.max_colwidth', 2000) # hypothesis name can be long!
>>> df = ex.summary(columns=['index', 'loss', 'return'])
>>> df.style.background_gradient(cmap='viridis')
"""
columns = columns or (['index'] + list(self.columns))
aggregate = aggregate or self.AGGREGATE_MEAN_LAST(0.1)
df = pd.DataFrame({'hypothesis': [h.name for h in self.hypotheses]})
hypo_means = [
(h.mean() if not all(len(df) == 0 for df in h._dataframes) \
else pd.DataFrame())
for h in self.hypotheses
]
for column in columns:
def df_series(df: pd.DataFrame):
if column == 'index':
return df.index
if column not in df:
return []
else:
return df[column].dropna()
def aggregate_h(series):
if len(series) == 0:
# after dropna, no numeric types to aggregate?
return np.nan
aggregate_fn = aggregate
if not callable(aggregate_fn):
aggregate_fn = aggregate[column]
v = aggregate_fn(series) if column != 'index' else series.max()
return v
df[column] = [aggregate_h(df_series(hm)) for hm in hypo_means]
return df
def hvplot(self, *args, **kwargs):
plot = None
for i, (name, hypo) in enumerate(self._hypotheses.items()):
p = hypo.hvplot(*args, label=name, **kwargs)
plot = (plot * p) if plot else p
return plot
plot = CachedAccessor("plot", _plot.ExperimentPlotter)
plot.__doc__ = _plot.ExperimentPlotter.__doc__
#########################################################################
# Data Parsing Functions
#########################################################################
def parse_run(run_folder, fillna=False, verbose=False) -> pd.DataFrame:
"""Create a pd.DataFrame object from a single directory."""
if verbose:
# TODO Use python logging
print(f"Reading {run_folder} ...", file=sys.stderr, flush=True)
# make it more general (rather than being specific to progress.csv)
# and support tensorboard eventlog files, etc.
sources = [
parse_run_progresscsv,
parse_run_tensorboard,
]
for fn in sources:
try:
df = fn(run_folder, fillna=fillna, verbose=verbose)
if df is not None:
break
except (FileNotFoundError, IOError) as e:
if verbose:
print(f"{fn.__name__} -> {e}\n", file=sys.stderr, flush=True)
else:
raise | pd.errors.EmptyDataError(f"Cannot handle dir: {run_folder}") | pandas.errors.EmptyDataError |
import re
import socket
from datetime import datetime
from urlextract import URLExtract
import urllib.parse as urlparse
from urllib.parse import parse_qs
import click
import argparse
import csv
import os
from dateutil.parser import parse
import pandas as pd
from urllib.parse import unquote
import hashlib
# When you need to connect to a database
#from pandas.io import sql
#import mysql.connector
#from sqlalchemy import create_engine
#import mysql.connector
#Global Variables
data = []
map_refer = {}
# Missing a ArgumentParser
#parser = argparse.ArgumentParser(description='Description of your program')
#parser.add_argument('-p','--path', help='Localization of the patching files', default= "./Files/")
#args = vars(parser.parse_args())
def extract(request):
"""
Extract url domain from wayback request.
"""
extractor = URLExtract()
try:
urls = extractor.find_urls('/'.join(request.split('/')[3:]))
if urls:
return urls[0]
else:
return None
except:
import pdb;pdb.set_trace()
def getParametersFromRequestWayback(request, df, i):
"""
Extract parameters from wayback request.
"""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate timestamp using the parameter DATE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
# Just a sanity check.
if re.match(r"GET /wayback/[0-9]+", request):
#Extract url domain
url = extract(request)
if urlparse.urlparse(url).netloc != "":
final_url = urlparse.urlparse(url).netloc
else:
final_url = url
#Put into a list to later generate a dataframe
data.append([df.at[i, "IP_ADDRESS"], df.at[i, "USER_AGENT"], date.timestamp(), df.at[i, "REQUEST"], df.at[i, "STATUS_CODE"], df.at[i, "PREVIOUS_REQUEST"], final_url])
except:
raise ValueError("Error - getParametersFromRequestWayback function")
def getParametersFromRequest(request, df, i, boolRequest):
"""
Extract and process the parameters from query request.
Function only used for Apache logs.
"""
# Check whether we are processing the request or the previous_request
if boolRequest:
#This request will not be analyzed in the first analysis, however it is done for later analysis.
#Image Search JSP and Page Search JSP will be treated as equals.
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /search.jsp?"):
df.at[i, 'TYPE_SEARCH'] = "search_jsp"
else:
df.at[i, 'TYPE_SEARCH'] = "images_jsp"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, LANG_REQUEST, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['query'][0])
df.at[i, 'LANG_REQUEST'] = parse_qs(parsed.query)['l'][0]
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['dateStart'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['dateEnd'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Image Search API and Page Search API calls will be treated as equals.
elif "textsearch?" in request or "imagesearch?" in request:
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /imagesearch?"):
df.at[i, 'TYPE_SEARCH'] = "imagesearch"
else:
df.at[i, 'TYPE_SEARCH'] = "textsearch"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, MAXITEMS, PAGE, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
#import pdb;pdb.set_trace()
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['q'][0])
offset = int(parse_qs(parsed.query)['offset'][0])
df.at[i, 'MAXITEMS'] = int(parse_qs(parsed.query)['maxItems'][0])
df.at[i, 'PAGE'] = int(offset/df.at[i, 'MAXITEMS'])
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['from'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['to'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Process the parameter REQUEST and set the parameter PREVIOUS_REQUEST
else:
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['query'][0]
elif request.startswith("GET /imagesearch?") or request.startswith("GET /textsearch?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['q'][0]
def processDataframe(request, previous_request, file_name, df, i, all_info_date):
"""
Function to process each log depending on the format (Apache vs Log4j)
"""
# Check if we are processing the Apache Log
if "logfile" in file_name:
getParametersFromRequest(request.replace(" HTTP/1.1", ""), df, i, True)
if pd.isnull(previous_request):
getParametersFromRequest(previous_request.replace(" HTTP/1.1", ""), df, i, False)
# if we are not processing the Apache Log
else:
#Only thing needed from request
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate TIMESTAMP using the parameter DATE and Set the parameters YEAR, MONTH, DAY, HOUR, MINUTE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
df.at[i, 'TIMESTAMP'] = date.timestamp()
if all_info_date:
df.at[i, 'YEAR'] = date.year
df.at[i, 'MONTH'] = date.month
df.at[i, 'DAY'] = date.day
df.at[i, 'HOUR'] = date.hour
df.at[i, 'MINUTE'] = date.minute
except:
df.at[i, 'BOT'] = 1
else:
df.at[i, 'BOT'] = 1
return date
def mergeFiles():
"""
Function that will process each log and merge them (The core of this file).
"""
click.secho("Start Process...", fg='green')
#Location\path of the Logs.
mypath = "./data/"
#Create Dataframes for each (Apache Log, Image Search API Log4j, Page Search API Log4j, Webapp API Log4j).
df_merge_apache_file = None
df_merge_image_file = None
df_merge_page_file = None
df_merge_arquivo_webapp_file = None
# Just to initialize variables that we are going to use (can be removed).
df_log = None
df_image = None
df_page = None
df_arquivo = None
## For each log file:
for subdir, dirs, files in os.walk(mypath):
#If list is not empty.
if files:
## Progress bar with the number of log files.
with click.progressbar(length=len(files), show_pos=True) as progress_bar_total:
for file in files:
progress_bar_total.update(1)
#Get Filename
file_name = os.path.join(subdir, file)
# Process Apache Logs
if file_name.startswith("./data/logs/arquivo.pt_apache/logfile"):
#Read file into Dataframe
names_apache = ["IP_ADDRESS", "CLIENT_ID", "USER_ID", "DATE", "ZONE", "REQUEST", "STATUS_CODE", "SIZE_RESPONSE", "PREVIOUS_REQUEST", "USER_AGENT", "RESPONSE_TIME"]
df_log = pd.read_csv(file_name, sep='\s+', names=names_apache)
#Init new collumns
df_log["UNIQUE_USER"] = ""
df_log["SPELLCHECKED"] = 0
df_log["REFER"] = ""
#Tracking
df_log["TRACKINGID"] = ""
df_log["USER_TRACKING_ID"] = ""
df_log["SEARCH_TRACKING_ID"] = ""
#Date
df_log["TIMESTAMP"] = 0
df_log["YEAR"] = 0
df_log["MONTH"] = 0
df_log["DAY"] = 0
df_log["HOUR"] = 0
df_log["MINUTE"] = 0
#Search and Query
df_log["TYPE_SEARCH"] = ""
df_log["QUERY"] = ""
df_log["LANG_REQUEST"] = ""
df_log["FROM_REQUEST"] = ""
df_log["TO_REQUEST"] = ""
df_log["PREVIOUS_QUERY"] = ""
df_log["MAXITEMS"] = 0
df_log["PAGE"] = 0
#Query from robots or internal requests (default is 0, "Not a Bot")
df_log["BOT"] = 0
## Progress Bar of the number of lines processed (Apache Log File).
with click.progressbar(length=df_log.shape[0], show_pos=True) as progress_bar:
for i in df_log.index:
progress_bar.update(1)
#Get Request
request = df_log.at[i, 'REQUEST']
#Get Previous Request
previous_request = df_log.at[i, 'PREVIOUS_REQUEST']
#Problem with some requestes
if isinstance(request, str) and isinstance(previous_request, str):
#We will create different files (Query Log file and Wayback Log file)
# Check if the request is not from wayback
if "wayback" not in request:
# Only process requests from textsearch, imagesearch, search.jsp, and images.jsp.
if request.startswith("GET /textsearch?") or request.startswith("GET /imagesearch?") or request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
processDataframe(request, previous_request, file_name, df_log, i, True)
#Generate a unique identifier for each user, making it an anonymized user.
string_user = str(df_log.at[i, 'IP_ADDRESS']) + str(df_log.at[i, 'USER_AGENT'])
df_log.at[i, 'UNIQUE_USER'] = int(hashlib.sha1(string_user.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
#Check if the entry was generated because the user clicked on the query suggestion.
if "spellchecked=true" in previous_request:
df_log.at[i, 'SPELLCHECKED'] = 1
#Get a dictionary with the refers
if "arquivo.pt" not in previous_request:
df_log.at[i, 'REFER'] = previous_request
if previous_request not in map_refer:
map_refer[previous_request] = 1
else:
map_refer[previous_request] += 1
else:
#This condition removes lines such as "GET /js/jquery-1.3.2.min.js HTTP/1.1"
df_log.at[i, 'BOT'] = 1
else:
"""
Process the wayback requests
"""
#Set the entrie as "Bot" to not appear in the queries dataset.
df_log.at[i, 'BOT'] = 1
getParametersFromRequestWayback(request, df_log, i)
else:
df_log.at[i, 'BOT'] = 1
#Remove entries from "BOTs"
df_log = df_log[df_log['BOT']==0]
#Concatenate the file with previous files
df_log = df_log[['IP_ADDRESS', 'STATUS_CODE', 'REQUEST', 'USER_AGENT', 'TRACKINGID', 'USER_TRACKING_ID', 'SEARCH_TRACKING_ID', 'TIMESTAMP', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'TYPE_SEARCH', 'QUERY', 'PAGE', 'MAXITEMS', 'LANG_REQUEST', 'FROM_REQUEST', 'TO_REQUEST', 'REFER', 'SPELLCHECKED', 'UNIQUE_USER']]
frames = [df_merge_apache_file, df_log]
df_merge_apache_file = pd.concat(frames)
## Logs Image Search API
if file_name.startswith("./data/logs/arquivo.pt_image_search/imagesearch"):
#Read file into DataFrame
names_image_search = ["DATE", "LOG_TYPE", "APPLICATION", "-", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "IMAGE_SEARCH_RESPONSE(ms)", "IMAGE_SEARCH_PARAMETERS", "IMAGE_SEARCH_RESULTS"]
df_image = pd.read_csv(file_name, sep='\t', error_bad_lines=False, names=names_image_search)
#Init New Collumns
df_image["TRACKINGID"] = ""
df_image["BOT"] = 0
df_image["TIMESTAMP"] = 0
## Progress Bar of the number of lines processed (Image Search API Log4j).
with click.progressbar(length=df_image.shape[0], show_pos=True) as progress_bar:
for i in df_image.index:
progress_bar.update(1)
# Just a sanity check.
if not pd.isnull(df_image.at[i, 'IP_ADDRESS']):
request = df_image.at[i, 'URL_REQUEST']
# Just a sanity check.
if not pd.isnull(request):
#Missing process better the URL #FIXME
processDataframe(request, "", file_name, df_image, i, False)
#Remove "ms" from the string
df_image.at[i, 'IMAGE_SEARCH_RESPONSE(ms)'] = df_image.at[i, 'IMAGE_SEARCH_RESPONSE(ms)'].replace("ms", "")
else:
df_image.at[i, 'BOT'] = 1
else:
df_image.at[i, 'BOT'] = 1
#Remove entries from "BOTs" and entries with empty TRACKINGID
df_image = df_image[df_image['BOT']==0]
df_image = df_image[df_image["TRACKINGID"] != ""]
#Concatenate the file with previous files
df_image = df_image[["TIMESTAMP", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "IMAGE_SEARCH_RESPONSE(ms)", "IMAGE_SEARCH_PARAMETERS", "IMAGE_SEARCH_RESULTS", "TRACKINGID"]]
frames = [df_merge_image_file, df_image]
df_merge_image_file = | pd.concat(frames) | pandas.concat |
"""
Transfer applications.
|pic1|
.. |pic1| image:: ../images_source/transfer_tools/transfer.png
:width: 30%
"""
import os
import sys
from subprocess import Popen, PIPE
from pathlib import Path
import pandas as pd
import pexpect
import requests
import zipfile
from selenium.webdriver.chrome import webdriver
import urllib3
import pdfplumber
import io
class Access:
"""
Functions for accessing file systems and protocols.
"""
@classmethod
def proxies(cls, domain):
"""
A function to create an http/https proxy address.
:param domain: domain address.
:return: Http/https proxy address.
"""
res = {
'http': 'http://' + \
os.environ['usr'] + \
':' + os.environ['pwd'] + \
f'@proxyfarm.{domain}.com:8080'
,
'https': 'https://' + \
os.environ['usr'] + \
':' + os.environ['pwd'] + \
f'@proxyfarm.{domain}.com:8080'
}
return res
class Local:
"""
Functions for accessing local files.
"""
@classmethod
def zip_dir(cls, directory_list, zipname):
"""
Compress a directory into a single ZIP file.
:param directory_list: List of files to compress into zip file.
:param zipname: Name of zip file to compress files into.
:return: Zip file containing files.
"""
outZipFile = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
for idx, dir in enumerate(directory_list):
# if idx == 0: break
if not os.path.exists(dir):
print(f"Error, directory {dir} does not exist")
continue
# The root directory within the ZIP file.
rootdir = os.path.basename(dir)
try:
os.listdir(dir)
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
# Write the file named filename to the archive,
# giving it the archive name 'arcname'.
filepath = os.path.join(dirpath, filename)
parentpath = os.path.relpath(filepath, dir)
arcname = os.path.join(rootdir, parentpath)
outZipFile.write(filepath, arcname)
except:
# exception means there are no files inside the directory
# so we write the normal file
outZipFile.write(dir, dir.split("/")[-1])
outZipFile.close()
@classmethod
def clear_delete_directory(cls, directory, method="delete"):
"""
Clears and/or deletes a directory and its contents.
:param directory: Filepath of directory.
:param method: Optional delete for the directory folder.
:return: Nothing.
"""
directory = Path(directory)
for item in directory.iterdir():
if item.is_dir():
Local.clear_delete_directory(item)
else:
item.unlink()
if method == "delete":
directory.rmdir()
@classmethod
def fix_user_path(cls, dir):
"""
Fixes a local filepath.
:param dir: Directory to patch.
:return: Patched directory.
"""
dir_components = dir.split("/")
search = "Users"
for idx, elem in enumerate(dir_components):
if elem == search:
break
dir_components[idx + 1] = os.environ['os_name']
r = "/".join(dir_components)
return r
@classmethod
def get_all_filetimes(cls, dir, exclude=False):
"""
Creates a Pandas DataFrame of filenames and file times in a given directory.
:param dir: Directory of files.
:param exclude: A string to search for files to exclude.
:return: Pandas DataFrame of filenames and file times to a directory.
"""
files = os.listdir(dir)
if exclude:
files = [f for f in files if exclude not in f]
times = [os.path.getmtime(dir + f) for f in files]
file_times = pd.DataFrame({"files": files, "times": times})
return file_times
@classmethod
def get_latest_file(cls, name, dir, exclude=False):
"""
Get the latest file in a directory.
:param name: String match name of file(s).
:param dir: Directory for the file search.
:param exclude: A string to search for files to exclude.
:return: Name of most recent file.
"""
# file name str to lowercase
name = name.lower()
# get list of files
files = os.listdir(dir)
if exclude:
files = [f for f in files if exclude not in f]
times = [os.path.getmtime(dir + f) for f in files]
file_times = pd.DataFrame({"files": files, "times": times})
file_times['files_lower'] = file_times['files'].str.lower()
file_times = file_times[file_times['files_lower'].str.contains(name)]
read_file = file_times[file_times['times'] == max(file_times['times'])]['files']
read_file = read_file.values[0]
return read_file
@classmethod
def read_files_like(cls, name, dir, ext_typ, sheet_name=False):
"""
Reads and concatenates files in a directory that match a string. Returns a Pandas DataFrame.
:param name: A string search to match.
:param dir: Directory to search for files in.
:param ext_typ: Extension type to search for (.XLSX OR .CSV)
:param sheet_name: If extension type is not ".CSV", specifies the sheet number or sheet name to read.
:return: Concatenated Pandas DataFrames that match a string.
"""
files = os.listdir(dir)
files = pd.DataFrame({"files": files})
files['files'] = files['files'].str.lower()
files = files[(
files['files'].str.contains(name)
&
files['files'].str.contains(ext_typ)
)]
files.reset_index(inplace=True)
for idx, f in files.iterrows():
if ext_typ == "csv":
dat = | pd.read_csv(dir + f['files']) | pandas.read_csv |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension import base
def make_data():
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return integer_array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([pd.NA, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return integer_array([1, pd.NA, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = pd.NA
return | integer_array([b, b, na, na, a, a, b, c], dtype=dtype) | pandas.core.arrays.integer_array |
"""
(C) IBM Corp, 2019, All rights reserved
Created on Aug 25, 2019
@author: <NAME>
"""
import unittest
import pandas as pd
import numpy as np
from causallib.estimation import MarginalOutcomeEstimator
class TestMarginalOutcomeEstimator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X = pd.DataFrame([[1, 1, 0, 0, 1, 0, 0, 0, 1, 1]])
cls.y = pd.Series([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
cls.a = pd.Series([0, 0, 0, 0, 1, 0, 1, 1, 1, 1])
def setUp(self):
self.model = MarginalOutcomeEstimator(learner=None)
def test_fit_return(self):
model = self.model.fit(self.X, self.a, self.y)
self.assertTrue(isinstance(model, MarginalOutcomeEstimator))
def test_outcome_estimation(self):
self.model.fit(self.X, self.a, self.y)
outcomes = self.model.estimate_population_outcome(self.X, self.a, self.y)
truth = pd.Series([1 / 5, 4 / 5], index=[0, 1])
pd.testing.assert_series_equal(truth, outcomes)
with self.subTest("Change covariate and see no change in estimation"):
X = pd.DataFrame(np.arange(20).reshape(4, 5)) # Different values and shape
outcomes = self.model.estimate_population_outcome(X, self.a, self.y)
truth = | pd.Series([1/5, 4/5], index=[0, 1]) | pandas.Series |
from sklearn.preprocessing import StandardScaler, LabelEncoder, Imputer
from keras.utils import np_utils
import pandas as pd
import numpy as np
import logging
class explicit_imputer():
def __init__(self):
pass
def transform(self, data):
if not isinstance(data, pd.DataFrame):
raise Exception("Input to explicit imputer has to be a pandas df")
data_out = data.fillna('None')
return data_out
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y) # convert label vector to dummy binaries matrix
return y, encoder
def drop_irrelevant_cols(data, cols_to_drop):
"""
Drop the cols_to_drop from the input data
:param data: pd.DataFrame
:param cols_to_drop: list
:return: pd.DataFrame, reduced dataframe
"""
reduced_data = data.drop(cols_to_drop, axis=1)
return reduced_data
def tidy_data(X):
"""
Calculate the additinoal fields, based on the raw fields in the dataset.
:param X: pd.DataFrame, input dataset
:return: X
"""
if not isinstance(X, pd.DataFrame):
raise Exception("Input to derive_data() has to be a pandas df")
# Calculate Age
if 'dem_mat_age' not in X.columns:
if ('dem_dob' in X.columns) and ('t1_date_of_exam' in X.columns):
X['t1_date_of_exam'] = pd.to_datetime(X['t1_date_of_exam'], format='%Y-%m-%d')
X['dem_dob'] = | pd.to_datetime(X['dem_dob'], format='%Y-%m-%d') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # Python script for automatic quality-control procedures (CEMADEN data)
# # Created on Aug.12.2020
# ### By:
# <NAME>
# <NAME>
# <NAME>
# Importing libraries used in this code
# In[ ]:
import numpy as np
import pandas as pd
from datetime import datetime
import glob
import warnings
from datetime import datetime
import sys
import esda
import libpysal as lps
# Assigning values to main variables and other parameters
# In[ ]:
#Data storage path
path = 'D:/CEMADEN/'
years = [2014 , 2015, 2016, 2017, 2018, 2019,2020] #years used in the analysis
states = ['PE','PB','RN','BA','SE','PI','CE','MA','AL','AC','AM','RO','RR','AP','TO','PA','MT',
'MS','DF','GO','RS','SC','PR','ES','MG','RJ','SP'] #states used in the analysis
#Filters variables
threshold_missing_data_days=60 #days in a year without data
threshold_days_without_rain=200 #days in a row without rain
threshold_constant_days=35 #days in a row with rain = 0,2mm
threshold_max_peak=40 #record of xmm in 10min
#Moran's I variables
properties=['rainfall_events','mean_rainfall_depth','yearly_rainfall'] #properties calculated based on the events durations defined
mits_integer = [60, 360,1439] #mit lenghts used
n_neighbors = 5
p_value = 0.05
# Functions
# ------
# In[ ]:
#This function inserts a beginning and ending date for each code, each year (1st january till 31st december) to compute 365 days
def insert_begin_end(df, year, code):
datatemp=str(year)+'-01-01 00:00:10' #temporary date - beginning
data_b = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp], #assigning beginning date to code
'rain_mm': [-1],
}
datatemp=str(year)+'-12-31 23:59:10' #temporary date - end
data_e = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp],
'rain_mm': [0],
}
df_b = pd.DataFrame(data_b)
df_e = pd.DataFrame(data_e)
df_b['datetime']= | pd.to_datetime(df_b['datetime']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = | tm.box_expected(expected, box2) | pandas.util.testing.box_expected |
# Prepare data script for longitudinal predictions.
import os
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
def preprocess(tadpoleD1D2File, out_dir, leaderboard=0, tadpoleLB1LB2File=''):
# Settings
#leaderboard = 0
# Input directory
#str_exp = os.path.dirname(os.path.realpath(__file__))
#os.chdir(str_exp)
# Input file
#tadpoleD1D2File = os.path.join(str_exp, 'Data', 'TADPOLE_D1_D2.csv')
Dtadpole = pd.read_csv(tadpoleD1D2File)
idx_progress = np.logical_and(Dtadpole['DXCHANGE'] >= 4, Dtadpole['DXCHANGE'] <= 6)
SubC = np.unique(Dtadpole.loc[idx_progress, 'RID'])
SubC = | pd.Series(SubC) | pandas.Series |
import copy
import json
import sdg
import pandas as pd
import numpy as np
import collections.abc
from sdg.translations import TranslationHelper
from sdg.Loggable import Loggable
class Indicator(Loggable):
"""Data model for SDG indicators."""
def __init__(self, inid, name=None, data=None, meta=None, options=None, logging=None):
"""Constructor for the SDG indicator instances.
Parameters
----------
inid : string
The three-part dash-delimited ID (eg, 1-1-1).
name : string
The name of the indicator.
data : Dataframe
Dataframe of all data, with at least "Year" and "Value" columns.
meta : dict
Dict of fielded metadata.
options : IndicatorOptions
Output-specific options provided by the OutputBase class.
"""
Loggable.__init__(self, logging=logging)
self.inid = inid
self.name = name
self.data = data
self.meta = meta
self.options = sdg.IndicatorOptions() if options is None else options
self.set_headline()
self.set_edges()
self.translations = {}
self.serieses = None
self.data_matching_schema = {}
def has_name(self):
"""Check to see if the indicator has a name.
Returns
-------
boolean
True if the indicator has a name.
"""
return self.name is not None
def get_name(self):
"""Get the name of the indicator if known, or otherwise the id.
Returns
-------
string
The name (or id) of the indicator.
"""
return self.name if self.name is not None else self.inid
def set_name(self, name=None):
"""Set the name of the indicator."""
if name is not None:
self.name = name
def has_data(self):
"""Check to see if this indicator has data.
Returns
-------
boolean
True if the indicator has data.
"""
if self.data is None:
# If data has not been set yet, return False.
return False
# Otherwise return False if there are no rows in the dataframe.
return False if len(self.data) < 1 else True
def has_meta(self):
"""Check to see if this indicator has metadata.
Returns
-------
boolean
True if the indicator has metadata.
"""
return False if self.meta is None else True
def set_data(self, val):
"""Set the indicator data if a value is passed.
Parameters
----------
val : Dataframe or None
"""
# If empty or None, do nothing.
if val is None or not isinstance(val, pd.DataFrame) or val.empty:
return
self.data = val
self.set_headline()
self.set_edges()
def set_meta(self, val):
"""Set the indicator metadata if a value is passed.
Parameters
----------
val : Dict or None
"""
if val is not None and val:
if self.has_meta():
self.meta = self.deepUpdate(self.meta, val)
else:
self.meta = val
def deepUpdate(self, d, u):
"""Recursive utility method for merging nested dicts."""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = self.deepUpdate(d.get(k, {}), v)
else:
d[k] = v
return d
def set_headline(self):
"""Calculate and set the headline for this indicator."""
self.require_data()
non_disaggregation_columns = self.options.get_non_disaggregation_columns()
self.headline = sdg.data.filter_headline(self.data, non_disaggregation_columns)
def has_headline(self):
"""Report whether this indicator has a headline."""
return hasattr(self, 'headline') and not self.headline.empty
def set_edges(self):
"""Calculate and set the edges for this indicator."""
self.require_data()
non_disaggregation_columns = self.options.get_non_disaggregation_columns()
self.edges = sdg.edges.edge_detection(self.inid, self.data, non_disaggregation_columns)
def has_edges(self):
"""Report whether this indicator has edges."""
return hasattr(self, 'edges') and not self.edges.empty
def get_goal_id(self):
"""Get the goal number for this indicator.
Returns
-------
string
The number of the goal.
"""
return self.inid if self.is_standalone() else self.inid.split('-')[0]
def get_target_id(self):
"""Get the target id for this indicator.
Returns
-------
string
The target id, dot-delimited.
"""
return self.inid if self.is_standalone() else '.'.join(self.inid.split('-')[0:2])
def get_indicator_id(self):
"""Get the indicator id for this indicator (dot-delimited version).
Returns
-------
string
The indicator id, dot-delimited.
"""
return self.inid if self.is_standalone() else self.inid.replace('-', '.')
def get_slug(self):
"""Get the dash-delimited id for this indicator (eg, for use in URLs).
Returns
-------
string
The indicator id, dash-delimited.
"""
return self.inid
def require_meta(self, minimum_metadata=None):
"""Ensure the metadata for this indicator has minimum necessary values.
Parameters
----------
minimum_metadata : Dict
Key/value pairs of minimum metadata for this indicator.
"""
if minimum_metadata is None:
minimum_metadata = {}
if self.meta is None:
self.meta = minimum_metadata
else:
for key in minimum_metadata:
if key not in self.meta:
self.meta[key] = minimum_metadata[key]
def require_data(self):
"""Ensure at least an empty dataset for this indicator."""
if self.data is None:
df = | pd.DataFrame({'Year':[], 'Value':[]}) | pandas.DataFrame |
import pandas as pd
import ast
import json
import pickle
import binascii
from parallelm.mlops.constants import Constants, HistogramType
from parallelm.mlops.stats_category import StatGraphType
from parallelm.mlops.mlops_exception import MLOpsException
from parallelm.mlops.constants import DataframeColNames
expected_jsons = [{"id":"","configurable":False,"mlStat":{"name":"","graphType":"BARGRAPH"},"values":[{"name":"stat","columns":["time","value"],"values":[["2018-02-08T18:27:28.273Z","{\"bar\": \"{\\\"Col1\\\": 10,\\\"Col2\\\": 15}\"}"]]}]},
{"id":"","configurable":False,"mlStat":{"name":"","id":"","graphType":"MULTILINEGRAPH"},"values":[{"name":"stat","columns":["time","value"],"values":[["2018-02-08T18:27:28.262Z","{\"l2\": 15, \"l3\": 22, \"l1\": 0}"]]}]},
{"id":"bebd75ee-0ce3-4ea8-90fa-ad90316b71b3","configurable":False,"mlStat":{"name":"","id":"", "graphType":"LINEGRAPH"},"values":[{"name": "stat","columns":["time","value"],"values":[["2018-02-08T18:27:27.183Z","{\"0\":1.0}"],["2018-02-08T18:27:27.187Z","{\"0\":2.0}"]]}]}]
class DataFrameHelper(object):
"""
Helper class to convert json to dataframes.
"""
@staticmethod
def _update_data_dict(data, key_values, time_stamp, column_name):
"""
Function to append dictionary to existing dictionary of columns : values
:return: Updated dictionary
"""
for k, v in key_values.items():
if k in data:
data[k].append(v)
else:
data[k] = [v]
if column_name in data:
data[column_name].append(time_stamp)
else:
data[column_name] = [time_stamp]
return data
@staticmethod
def _create_dict(graph_type, values, stat_name, columns, no_of_lines):
"""
Function to create dictionary with given number of columns : values
:return: Dictionary
:raises: MLOpsException
"""
data_dictionary = {}
data = {}
lines = 0
for value in values:
if lines == no_of_lines:
data_dictionary.update(data)
return data_dictionary
if len(value) == len(columns) and len(value) >= Constants.MIN_STAT_COLUMNS:
if graph_type == StatGraphType.LINEGRAPH:
if columns[1] in data:
data[columns[1]].append(list(ast.literal_eval(value[1]).values())[0])
data[columns[0]].append(value[0])
else:
data[columns[1]] = list(ast.literal_eval(value[1]).values())
data[columns[0]] = [value[0]]
elif graph_type == StatGraphType.MULTILINEGRAPH:
temp_values = ast.literal_eval(value[1])
data.update(DataFrameHelper._update_data_dict(data, temp_values, value[0], columns[0]))
elif graph_type == StatGraphType.BARGRAPH:
temp_values = ast.literal_eval(value[1])
for k, v in temp_values.items(): # To ignore legend of graph key, need to go one more level down
temp_bar_values = {}
if type(v) == list:
hist = []
bin_edges = []
keys = []
for item in v:
hist.append(list(item.values())[0])
key = list(item.keys())[0]
keys.append(key)
for bin_edge in key.split(HistogramType.HISTOGRAM_BIN_IDENTIFIER):
if bin_edge not in bin_edges:
bin_edges.append(bin_edge)
if all(HistogramType.HISTOGRAM_BIN_IDENTIFIER in edge for edge in keys):
temp_bar_values[HistogramType.HISTOGRAM_TYPE_COLUMN] = \
StatGraphType.HISTOGRAM_TYPE_CONTIGOUOUS
else:
temp_bar_values[HistogramType.HISTOGRAM_TYPE_COLUMN] = \
StatGraphType.HISTOGRAM_TYPE_CATEGORICAL
temp_bar_values[HistogramType.HISTOGRAM_COLUMN] = hist
temp_bar_values[HistogramType.BIN_EDGES_COLUMN] = bin_edges
temp_bar_values[HistogramType.STAT_NAME_COLUMN] = stat_name
else:
temp_bar_values = ast.literal_eval(v)
data.update(DataFrameHelper._update_data_dict(data, temp_bar_values, value[0], columns[0]))
elif graph_type == StatGraphType.OPAQUE:
vv = ast.literal_eval(value[1])
key, opq_data = vv.popitem()
opq_data = pickle.loads(binascii.unhexlify(opq_data.encode('utf-8')))
if len(data) == 0:
data[columns[0]] = [value[0]]
data[DataframeColNames.OPAQUE_DATA] = [opq_data]
else:
data[columns[0]].append(value[0])
data[DataframeColNames.OPAQUE_DATA].append(opq_data)
else:
raise MLOpsException("Invalid json, Number of columns and values don't match. Given columns: {}, "
"values: {}. \nExpected json formats:{}".format(columns, value, expected_jsons))
lines += 1
data_dictionary.update(data)
return data_dictionary
@staticmethod
def single_histogram_dict(json_dict, stat_name):
hist_values = []
bin_edges = []
all_bin_str = []
for bin_and_value in json_dict:
bin_str = None
value = None
for item in bin_and_value.items():
bin_str = item[0]
value = item[1]
break
hist_values.append(value)
all_bin_str.append(bin_str)
for bin_edge in bin_str.split(HistogramType.HISTOGRAM_BIN_IDENTIFIER):
if bin_edge not in bin_edges:
bin_edges.append(bin_edge)
if all(HistogramType.HISTOGRAM_BIN_IDENTIFIER in edge for edge in all_bin_str):
hist_type = StatGraphType.HISTOGRAM_TYPE_CONTIGOUOUS
else:
hist_type = StatGraphType.HISTOGRAM_TYPE_CATEGORICAL
hist_dict = {
HistogramType.STAT_NAME_COLUMN: stat_name,
HistogramType.HISTOGRAM_TYPE_COLUMN: hist_type,
HistogramType.HISTOGRAM_COLUMN: hist_values,
HistogramType.BIN_EDGES_COLUMN: bin_edges
}
return hist_dict
@staticmethod
def multi_histogram_df(multi_histogram_dict):
hist_list = []
for attribute in multi_histogram_dict:
hist_dict = DataFrameHelper.single_histogram_dict(multi_histogram_dict[attribute], attribute)
hist_list.append(hist_dict)
df = | pd.DataFrame(data=hist_list) | pandas.DataFrame |
"""
将原来的数据库,变成一个stock id一个文件的数据库
"""
import os
import pandas as pd
import numpy as np
import pickle
# 导入行情数据
file_path = 'C:/Users/Administrator/Desktop/program/data/hangqing/'
file_list = os.listdir(file_path)
columns_name = pd.read_csv(file_path+file_list[0]).columns
hangqing_record = []
temp_record = pd.DataFrame(columns=columns_name)
for i in range(len(file_list)):
now_path = file_path+file_list[i]
now_df = pd.read_table(now_path, sep=',')
temp_record = pd.concat((temp_record, now_df), axis=0)
if (i+1) % 50 == 0 or (i+1) == len(file_list):
del temp_record['Unnamed: 0']
del temp_record['Unnamed: 25']
hangqing_record.append(temp_record)
temp_record = | pd.DataFrame(columns=columns_name) | pandas.DataFrame |
'''
Created on Nov. 11, 2019
Mosaik interface for the Distribution State Estimation.
@file simulator_dse.py
@author <NAME>
@date 2019.11.11
@version 0.1
@company University of Alberta - Computing Science
'''
import mosaik_api
import numpy as np
import pandas as pd
import os
import sys
import csv
from ast import literal_eval
import scipy.io as spio
import math
from pathlib import Path
META = {
'models': {
'Estimator': {
'public': True,
'params': ['idt', 'ymat_file', 'devs_file', 'acc_period', 'max_iter', 'threshold', 'baseS', 'baseV', 'baseNode', 'basePF', 'se_period', 'se_result', 'pseudo_loads', 'verbose'],
'attrs': ['v', 't'],
},
},
}
class DSESim(mosaik_api.Simulator):
def __init__(self):
super().__init__(META)
self.entities = {}
self.next = {}
self.instances = {}
self.devParams = {}
self.data = {}
def init(self, sid, eid_prefix=None, step_size=1, verbose=0):
if eid_prefix is not None:
self.eid_prefix = eid_prefix
self.sid = sid
self.step_size = step_size
self.verbose = verbose
self.cktState = {}
self.MsgCount = 0
return self.meta
def create(self, num, model, idt, ymat_file, devs_file, acc_period, max_iter, threshold, baseS, baseV, baseNode, basePF, se_period, pseudo_loads, se_result):
if (self.verbose > 0): print('simulator_dse::create', num, model, idt)
eid = '%s%s' % (self.eid_prefix, idt)
self.entities[eid] = {}
self.entities[eid]['ymat_file'] = ymat_file
self.entities[eid]['devs_file'] = devs_file
self.entities[eid]['acc_period'] = acc_period
self.entities[eid]['max_iter'] = max_iter
self.entities[eid]['threshold'] = threshold
self.entities[eid]['type'] = model
self.entities[eid]['baseS'] = baseS
self.entities[eid]['baseV'] = baseV
self.entities[eid]['baseI'] = baseS/baseV
self.entities[eid]['baseY'] = baseS/np.power(baseV,2)
self.entities[eid]['baseNode'] = baseNode
self.entities[eid]['basePF'] = basePF
self.entities[eid]['se_period'] = se_period
self.entities[eid]['pseudo_loads'] = pseudo_loads
self.entities[eid]['se_result'] = se_result
self.entities[eid]['vecZ'] = {}
self.entities[eid]['nodes'] = 0
self.entities[eid]['df_devs'] = | pd.DataFrame({}) | pandas.DataFrame |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestComputeMoments(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series, prefix="moments_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_moments(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.compute_moments(series)
def test7(self) -> None:
"""
Test series with `inf`.
"""
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[4] = np.inf
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeFracZero(hut.TestCase):
def test1(self) -> None:
data = [0.466667, 0.2, 0.13333, 0.2, 0.33333]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.4,
0.2,
0.4,
0.0,
0.6,
0.4,
0.6,
0.2,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 20 / 75 = num_zeros / num_points.
expected = 0.266666
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_zero(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeFracNan(hut.TestCase):
def test1(self) -> None:
data = [0.4, 0.133333, 0.133333, 0.133333, 0.2]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.2,
0.2,
0.2,
0.0,
0.4,
0.2,
0.6,
0.0,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 15 / 75 = num_nans / num_points.
expected = 0.2
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_nan(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeNumFiniteSamples(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_finite_samples(series)
class TestComputeNumUniqueValues(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_unique_values(series)
class TestComputeDenominatorAndPackage(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati._compute_denominator_and_package(reduction=1, data=series)
class TestTTest1samp(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.ttest_1samp(series)
def test2(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test4(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.ttest_1samp(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestMultipleTests(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.multipletests(series)
# Test if error is raised with default arguments when input contains NaNs.
@pytest.mark.xfail()
def test2(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans, nan_mode="drop")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = hut.get_random_df(
num_cols=1,
seed=seed,
**date_range,
)[0]
return series
class TestMultiTTest(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
df = pd.DataFrame(columns=["series_name"])
cstati.multi_ttest(df)
def test2(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, prefix="multi_ttest_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, popmean=1)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, method="sidak")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@pytest.mark.xfail()
def test7(self) -> None:
df = self._get_df_of_series(seed=1)
df.iloc[:, 0] = np.nan
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_df_of_series(seed: int) -> pd.DataFrame:
n_series = 7
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
# Generating a dataframe from different series.
df = pd.DataFrame(
[
arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed + i
)
for i in range(n_series)
],
index=["series_" + str(i) for i in range(n_series)],
).T
return df
class TestApplyNormalityTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series, prefix="norm_test_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.apply_normality_test(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(
series, nan_mode="ffill_and_drop_leading"
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_normality_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyAdfTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, regression="ctt")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, maxlag=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, autolag="t-stat")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, prefix="adf_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.apply_adf_test(series)
def test7(self) -> None:
series = self._get_series(seed=1)
series[3:5] = np.nan
actual = cstati.apply_adf_test(series, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test8(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_adf_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyKpssTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, regression="ct")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, nlags="auto")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, nlags=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, prefix="kpss_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.apply_kpss_test(series)
def test7(self) -> None:
series = self._get_series(seed=1)
series[3:5] = np.nan
actual = cstati.apply_kpss_test(series, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test8(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_kpss_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyLjungBoxTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, lags=3)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, model_df=3)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, period=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, prefix="lb_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, return_df=False)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test7(self) -> None:
series = | pd.Series([]) | pandas.Series |
import numpy as np
import pandas as pd
import seaborn as sns
import json
from basic.plot import short_name
from basic.read_data import file_settings, read_specify
# clean the dataframe ordered by the sampling-based sensitivity indices
def read_total_effects(fpath_save, product_uniform):
if product_uniform == 'beta':
dist_type = 'beta'
elif product_uniform == 'exact':
dist_type = 'exact'
else:
dist_type = 'uniform'
filename = f'adaptive-reduce-{dist_type}_552.npz'
fileread = np.load(f'{fpath_save}{filename}', allow_pickle=True)
return fileread
def df_read(df, result_type, type_num):
_, parameters = read_specify('parameter', 'reduced', 'uniform', num_vars=11)
df.rename(columns={'Unnamed: 0' : 'Parameters'}, inplace=True)
df['Parameters'] = parameters
df['Type'] = result_type
df['Type_num'] = type_num
return df
# End df_read()
fpath_save = '../output/test/'
# read total effects calculated by different PCE settings.
nsample = 156
fileread = read_total_effects(fpath_save, product_uniform='uniform')
df_temp = | pd.DataFrame.from_dict(fileread[fileread.files[-1]][()][f'nsample_{nsample}']) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import torch
from scipy.interpolate import CubicSpline # for warping
from transforms3d.axangles import axangle2mat # for rotation
import pywt
from scipy import signal
import pandas as pd
class AddGaussianNoise(object):
def __init__(self, mean=0.0, variance=1.0, amplitude=1.0):
self.mean = mean
self.variance = variance
self.amplitude = amplitude
def __call__(self, img):
img = np.array(img)
h, w, c = img.shape
N = self.amplitude * np.random.normal(loc=self.mean, scale=self.variance, size=(h, w, 1))
N = np.repeat(N, c, axis=2)
img = N + img
#img[img > 255] = 255 # 避免有值超过255而反转
#img = Image.fromarray(img.astype('uint8')).convert('RGB')
return img
class dataReshape(object):
def __init__(self, len):
self.len = len
pass
def __call__(self, data):
if self.len == 3:
data = data.squeeze(2)
elif self.len == 4:
data = data.unsqueeze(2)
return data
## This example using cubic splice is not the best approach to generate random curves.
## You can use other aprroaches, e.g., Gaussian process regression, Bezier curve, etc.
def GenerateRandomCurves(X, sigma=0.2, knot=4):
# X (C, L)
# out (C, L) np.ndarry
from scipy.interpolate import CubicSpline
xx = (np.ones((X.shape[0], 1)) * (np.arange(0, X.shape[1], (X.shape[1] - 1) / (knot + 1)))).transpose()
yy = np.random.normal(loc=1.0, scale=sigma, size=(knot + 2, X.shape[0]))
x_range = np.arange(X.shape[1])
cs = []
for i in range(X.shape[0]):
cs.append(CubicSpline(xx[:, i], yy[:, i]))
return np.array([cs_i(x_range) for cs_i in cs])
class GenerateRandomCurvesClass(object):
def __init__(self, sigma=0.2, knot=4):
self.sigma = sigma
self.knot = knot
def __call__(self, tensor):
res = GenerateRandomCurves(tensor, self.sigma, self.knot)
res = torch.from_numpy(res)
return res
def DistortTimesteps(X, sigma=0.2):
# X: (C, L)
# out: (C, L) np.ndarry
tt = GenerateRandomCurves(X, sigma).transpose() # Regard these samples aroun 1 as time intervals
tt_cum = np.cumsum(tt, axis=0) # Add intervals to make a cumulative graph
# Make the last value to have X.shape[0]
t_scale = [(X.shape[1] - 1) / tt_cum[-1, i] for i in range(X.shape[0])]
for i in range(X.shape[0]):
tt_cum[:,i] = tt_cum[:,i]*t_scale[i]
return tt_cum.transpose()
class DistortTimestepsClass(object):
def __init__(self, sigma=0.2):
self.sigma = sigma
def __call__(self, tensor):
x = DistortTimesteps(tensor, self.sigma)
x = torch.from_numpy(x)
return x
def RandSampleTimesteps(X, nSample=1000):
# X: (C, L)
# out: (C, L) np.ndarry
tt = np.zeros((nSample,X.shape[0]), dtype=int)
for i in range(X.shape[0]):
tt[1:-1,i] = np.sort(np.random.randint(1,X.shape[1]-1,nSample-2))
tt[-1,:] = X.shape[1]-1
return tt.transpose()
class RandSampleTimestepsClass(object):
def __init__(self, nSample=1000):
self.nSample = nSample
def __call__(self, tensor):
x = RandSampleTimesteps(tensor, self.nSample)
x = torch.from_numpy(x)
return x
def WTfilt_1d(sig):
coeffs = pywt.wavedec(data=sig, wavelet='db5', level=9)
cA9, cD9, cD8, cD7, cD6, cD5, cD4, cD3, cD2, cD1 = coeffs
# 如果按照原来的写法loss会变成NaN
#threshold = (np.median(np.abs(cD1)) / 0.6745) * (np.sqrt(2 * np.log(len(cD1))))
threshold = (np.median(np.abs(cD1)) / 0.6745) * (np.sqrt(2 * np.log(len(cD1[0]))))
# 将高频信号cD1、cD2置零
cD1.fill(0)
cD2.fill(0)
# 将其他中低频信号按软阈值公式滤波
for i in range(1, len(coeffs) - 2):
coeffs[i] = pywt.threshold(coeffs[i], threshold)
rdata = pywt.waverec(coeffs=coeffs, wavelet='db5')
return rdata
class WTfilt_1d_Class(object):
def __init__(self):
pass
def __call__(self, tensor):
x = WTfilt_1d(tensor)
x = torch.from_numpy(x)
return x
class Jitter(object):
"""
Args:
sigma
"""
def __init__(self, sigma=0.05):
self.sigma = sigma
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
myNoise = torch.normal(mean=torch.zeros(tensors.shape), std=self.sigma)
# print("This is Jitter")
# print(type(tensors + myNoise))
return (tensors + myNoise).float()
def __repr__(self):
return self.__class__.__name__ + '(sigma={0})'.format(self.sigma)
class Scaling(object):
"""
Args:
sigma
"""
def __init__(self, sigma=0.1):
self.sigma = sigma
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
scalingFactor = torch.normal(mean=torch.ones((tensors.shape[0], 1)), std=self.sigma)
myNoise = torch.matmul(scalingFactor, torch.ones((1, tensors.shape[1])))
# print("This is Scaling")
# print(type(tensors * myNoise))
return (tensors * myNoise).float()
def __repr__(self):
return self.__class__.__name__ + '(sigma={0})'.format(self.sigma)
class MagWarp(object):
"""
Args:
sigma
"""
def __init__(self, sigma=0.2):
self.sigma = sigma
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
# print("This is MagWarp")
# print(type(tensors * torch.from_numpy(GenerateRandomCurves(tensors, self.sigma))))
return tensors * torch.from_numpy(GenerateRandomCurves(tensors, self.sigma))
def __repr__(self):
return self.__class__.__name__ + '(sigma={0})'.format(self.sigma)
class TimeWarp(object):
"""
Args:
sigma
"""
def __init__(self, sigma=0.2):
self.sigma = sigma
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
tt_new = DistortTimesteps(tensors, self.sigma)
X_new = np.zeros(tensors.shape)
x_range = np.arange(tensors.shape[1])
for i in range(tensors.shape[0]):
X_new[i, :] = np.interp(x_range, tt_new[i, :], tensors[i, :])
# print("This is TimeWarp")
# print(type(torch.from_numpy(X_new)))
return torch.from_numpy(X_new).float()
def __repr__(self):
return self.__class__.__name__ + '(sigma={0})'.format(self.sigma)
class Rotation(object):
"""
Args:
"""
def __init__(self):
pass
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
#axis = torch.Tensor(tensors.shape[0]).uniform_(-1, 1)
#angle = torch.Tensor().uniform_(-np.pi, np.pi)
axis = torch.Tensor(1).uniform_(-1, 1)
angle = torch.Tensor(1).uniform_(-np.pi, np.pi)
x = axangle2mat(axis, angle)
x = torch.from_numpy(x)
return torch.matmul(x, tensors).float()
# print("This is Rotation")
# print(type(torch.matmul(axangle2mat(axis, angle), tensors)))
#return torch.matmul(axangle2mat(axis, angle), tensors).float()
def __repr__(self):
return self.__class__.__name__
class Permutation(object):
"""
Args:
nPerm:
minSegLength:
"""
def __init__(self, nPerm=4, minSegLength=10):
self.nPerm = nPerm
self.minSegLength = minSegLength
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
# 注意X_new的dtype要改成float,否则无法生成希望的效果
X_new = torch.zeros(tensors.shape, dtype=torch.float)
idx = torch.randperm(self.nPerm)
bWhile = True
while bWhile == True:
segs = torch.zeros(self.nPerm + 1, dtype=torch.int64)
segs[1:-1] = torch.sort(torch.randint(self.minSegLength, tensors.shape[1] - self.minSegLength, (self.nPerm - 1,))).values
segs[-1] = tensors.shape[1]
if torch.min(segs[1:] - segs[0:-1]) > self.minSegLength:
bWhile = False
pp = 0
for ii in range(self.nPerm):
x_temp = tensors[:, segs[idx[ii]]:segs[idx[ii] + 1]]
X_new[:, pp:pp + x_temp.shape[1]] = x_temp
pp += x_temp.shape[1]
# print("This is Permutation")
# print(type(X_new))
return (X_new).float()
def __repr__(self):
return self.__class__.__name__
class RandSampling(object):
"""
Args:
nSample:
"""
def __init__(self, nSample=1000):
self.nSample = nSample
def __call__(self, tensors):
"""
Args:
tensor (Tensor): Tensor of size (C, L) to be scaled.
Returns:
Tensor: Scaled Tensor.
"""
tt = RandSampleTimesteps(tensors, self.nSample)
X_new = np.zeros(tensors.shape)
for i in range(tensors.shape[0]):
X_new[i, :] = np.interp(np.arange(tensors.shape[1]), tt[i, :], tensors[i, tt[i, :]])
# print("This is RandSampling")
# print(type(torch.from_numpy(X_new)))
return (torch.from_numpy(X_new).float())
def __repr__(self):
return self.__class__.__name__
class filter_and_detrend(object):
"""
Args:
"""
def __init__(self):
pass
def __call__(self, data):
"""
Args:
data: 12 lead ECG data . For example,the shape of data is (12,5000)
Returns:
Tensor: 12 lead ECG data after filtered and detrended
"""
filtered_data = | pd.DataFrame() | pandas.DataFrame |
r'''
myapps\tkinter_stuff\official-example-pandastable.py
import page_gui_menu_support
from Mytable import mytable
# implement pandastable
# ----
self.PD = Mytable(top)
self.PD.place(relx=0.0, rely=0.0, relheight=1.0, relwidth=1.0)
if __name__ == '__main__':
vp_start_gui()
'''
# pylint: disable=wildcard-import
# pylint: disable=unused-import
# pylint: disable=unused-wildcard-import
# pylint: disable=wrong-import-order, inconsistent-return-statements,
# pylint: disable=no-self-use
import os
# import logging
# from blinker import signal
import blinker
from tkinter import * # noqa: F403, F401
from tkinter.ttk import * # noqa: F403, F401
import pandas as pd
import numpy as np
from pandastable.core import Table
# from pandastable.data import TableModel
import logzero
from logzero import logger
from extract_rows import extract_rows
SIG_TABLE = blinker.signal('table')
SIG_PAD = blinker.signal('pad')
# LOGGER = logging.getLogger(__name__)
# LOGGER.addHandler(logging.NullHandler())
_ = os.environ.get("ALIGNER_DEBUG")
if _ is not None and _.lower() in ["1", "true"]:
level = 20
else:
level = 20
from logzero import setup_logger
logger = setup_logger(
name=__file__,
level=level,
)
logger.debug('os.environ.get("ALIGNER_DEBUG"): %s', _)
class MyTable(Table): # pylint: disable=too-many-ancestors
"""
Custom table class inherits from Table.
You can then override required methods
"""
def __init__(self, parent=None, **kwargs):
# Table.__init__(self, parent, **kwargs)
super().__init__(parent, **kwargs)
def handle_signal(sender, **kw):
self.slot_table(sender, **kw)
self.handle_signal = handle_signal # important, not for nothing
SIG_TABLE.connect(handle_signal)
# in effect SIG_TABLE.connect(self.slot_table)
self.row_clicked = None
self.column_clicked = None
# setup for initial open1 open2 pad
# in open1/open2: do extract_rows and SIG_TABLE.send (imitate handle_left_click)
self.row_clicked = 0
self.column_clicked = 0
self.showstatusbar = True
# print(*args, **kwargs)
# print('** rows cols **: ', self.rows, self.cols)
# '''
# pandastable default rows == 30, cols == 5?
if self.rows == 20 and self.cols == 5:
_ = ' ' * 40
_ = ' ' * 60
_ = ' ' * 1
dataframe = pd.DataFrame({
# 'a': range(3),
# 'a': ['0', '1', 2],
'text1': ['0' + _, '1', '2'],
# 'b': range(3, 3 + 3),
# 'b': ['3', '4', 5],
'text2': ['3' + _, '4', '5'],
# 'c': range(6, 6 + 3),
# 'c': ['6', '7', 8],
# 'merit': [0.6, 0.7, 0.8],
'merit': ["", "", ""],
})
Table.__init__(self, parent, dataframe=dataframe, showstatusbar=True)
# activate pad: imitate left click row-0 of table
data = extract_rows(self.model.df, self.row_clicked)
# SIG_TABLE.send('table', data=data)
# SIG_TABLE.send(data=data)
SIG_PAD.send(data=data)
# self.dataframe = dataframe
# self.show()
# '''
# Canvas .geometry("988x447+626+56")
# mytable.columnwidths: {'text1': 300, 'text2': 300, 'merit': 80}
self.columnwidths = {'text1': 430, 'text2': 430, 'merit': 80}
self.show()
if parent is not None:
logger.debug("parent.winfo_width(): %s", parent.winfo_width())
logger.debug("parent.winfo_geometry(): %s", parent.winfo_geometry())
logger.debug("mytable.columnwidths: %s", self.columnwidths)
def handle_left_click(self, event):
r''' handle left click
left-click to select
myapps\tkinter_stuff\row_clicked_selected-pandastable.py
pandastable source code: handle_right_click sans popmenu
'''
super().handle_left_click(event)
rowclicked = self.get_row_clicked(event)
self.row_clicked = rowclicked
# logger.info("event: %s, type: %s", event, type(event))
logger.debug("RowClicked: %s", rowclicked)
rowsel = self.getSelectedRow()
logger.debug("RowSelected: %s", rowsel)
# left click to select
# table.currentrow = rowclicked
# table.setSelectedRow(rowclicked)
# from source code handle_right_click(self, event)
# (popmenu removed), substitute self with table
self.delete('tooltip')
self.rowheader.clearSelected()
if hasattr(self, 'rightmenu'):
self.rightmenu.destroy()
rowclicked = self.get_row_clicked(event)
colclicked = self.get_col_clicked(event)
if rowclicked is None or colclicked is None:
return None
# color the third col
# self.Table.setRowColors(rows=rows1, clr="#FF0000", cols=[2])
# self.Table.update_rowcolors()
# for elm in range(self.rows): # slow response for long table
# self.setRowColors(rows=elm, clr="#FF0000", cols=[2])
# logger.info("clicked, self.rows: %s", self.rows)
# df = self.model.df
# idx = df.index[rows]
# df = self.model.df
# logger.info(" df.index: %s, df.to_dict(): %s", df.index, df.to_dict())
# self.setRowColors(rows=list(range(self.rows)), clr="#FF0000", cols=[2])
# self.update_rowcolors()
# self.redraw()
# self.setColorbyValue() # popup
if 0 <= rowclicked < self.rows and 0 <= colclicked < self.cols:
self.clearSelected()
self.allrows = False
self.setSelectedRow(rowclicked)
self.setSelectedCol(colclicked)
self.drawSelectedRect(self.currentrow, self.currentcol)
self.drawSelectedRow()
# self.model.df.iloc[self.currentrow, self.currentcol] = 0 # clear
# self.redraw() # !OKOK
# ---
# print(self.model.df)
# print("currentrow: ", self.currentrow)
# print("currentcol: ", self.currentcol)
# populate to MyPad
# blinker?
data = extract_rows(self.model.df, self.row_clicked)
# SIG_TABLE.send('table', data=data)
# SIG_TABLE.send(data=data)
SIG_PAD.send(data=data)
# self.setRowColors(rows=list(range(self.rows)), clr="#FF0000", cols=[2])
# self.setRowColors(rows=[-2, -1], clr="#FF0000", cols=[2]) # does not work
# """
def slot_table(self, sender, **kw):
''' handle data for SIG_TABLE (from mypad and other sources) '''
# logger.debug('**************Enter slot_table, received - sender: %s, kw: \n%s', sender, kw)
# handle df from longtime_job: SIG_PAD.send('job', df=df_data)
# logger.debug(" received kw: %s", kw)
logger.debug(" received kw ")
df = kw.get("df")
if df is not None:
columns = ["text1", "text2", "merit"]
df = | pd.DataFrame(df, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
# from datetime import datetime
# from tzwhere import tzwhere
# from dateutil import tz
# import pytz
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
return km
def generate_zone_att(data, save_file, data_output_path):
data['stops'] = data['stops'].fillna('NA')
data.loc[data['type'] == 'Station', 'zone_id'] = 'INIT'
data['zone_id'] = data['zone_id'].fillna(method='bfill')
data['zone_id'] = data['zone_id'].fillna(method='ffill')
# data = data.dropna(subset = ['zone_id'])
data['total_vol'] = data['depth_cm'] * data['height_cm'] * data['width_cm']
# data['num_pkg'] = data.groupby([''])
data['max_dhw'] = np.maximum(np.maximum(data['depth_cm'].values, data['height_cm'].values), data['width_cm'].values)
data['time_window_end_dt'] = pd.to_datetime(data['time_window_end'], format='%Y-%m-%d %H:%M:%S')
data['departure_date_time'] = data['date'] + ' ' + data['departure_time']
data['departure_date_time_dt'] = pd.to_datetime(data['departure_date_time'], format='%Y-%m-%d %H:%M:%S')
data['time_window_end_from_departure_sec'] = data['time_window_end_dt'] - data['departure_date_time_dt']
data['time_window_end_from_departure_sec'] = data['time_window_end_from_departure_sec'].dt.total_seconds()
data['time_window_end_from_departure_sec'] = data['time_window_end_from_departure_sec'].fillna(99999)
data_stops = data[['route_id', 'zone_id', 'stops', 'lat', 'lng']].drop_duplicates()
data_stops['total_num_stops_per_zone'] = data_stops.groupby(['route_id', 'zone_id'])['stops'].transform('count')
data_stops['lat_mean'] = data_stops.groupby(['route_id', 'zone_id'])['lat'].transform('mean')
data_stops['lng_mean'] = data_stops.groupby(['route_id', 'zone_id'])['lng'].transform('mean')
data_stops = data_stops[
['route_id', 'zone_id', 'lat_mean', 'lng_mean', 'total_num_stops_per_zone']].drop_duplicates()
data['n_pkg'] = data.groupby(['route_id', 'zone_id'])['pack_ID'].transform('count')
col_to_group = ['route_id', 'zone_id', 'station_code', 'departure_date_time', 'exe_cap_cm3', 'route_score', 'n_pkg']
data_zone = data.groupby(col_to_group, sort=False).agg({'planned_service_time': ['sum'],
'depth_cm': ['max', 'mean', 'sum'],
'height_cm': ['max', 'mean', 'sum'],
'width_cm': ['max', 'mean', 'sum'],
'total_vol': ['max', 'mean', 'sum'],
'max_dhw': ['max', 'mean', 'sum'],
'time_window_end_from_departure_sec': [
'min']}).reset_index()
data_zone.columns = data_zone.columns = ['_'.join(col).strip() for col in data_zone.columns.values]
for col in col_to_group:
data_zone = data_zone.rename(columns={col + '_': col})
data_zone = data_zone.merge(data_stops, on=['route_id', 'zone_id'])
################
## get num tra sigs
# station_location = pd.read_csv('../data/station_location.csv')
# station_list = list(station_location['station_code'])
# sig_station = []
# for station in station_list:
# nodes = pd.read_csv('../baichuan_ML_test/' + station + 'net/primary/node.csv')
# nodes = nodes.loc[:, ['node_id', 'osm_highway', 'x_coord', 'y_coord']]
# # cross = nodes.loc[nodes['osm_highway'] == 'crossing']
# signals = nodes.loc[nodes['osm_highway'] == 'traffic_signals'].copy()
# signals['station_code'] = station
# sig_station.append(signals)
# a = 1
#
# sig_station = pd.concat(sig_station)
# data_zone['key'] = 1
# sig_station['key'] = 1
# data_zone_sig = data_zone.merge(sig_station, on=['key', 'station_code'])
# data_zone_sig['dist'] = haversine_np(data_zone_sig['lng_mean'], data_zone_sig['lat_mean'], data_zone_sig['x_coord'],
# data_zone_sig['y_coord'])
# #
# nearby = 0.5 # 5km
# #
# data_zone_sig = data_zone_sig.loc[data_zone_sig['dist'] <= nearby]
# data_zone_sig_num = data_zone_sig.groupby(['route_id', 'zone_id'])['osm_highway'].count().reset_index()
# data_zone_sig_num = data_zone_sig_num.rename(columns={'osm_highway': 'num_tra_sig'})
# data_zone = data_zone.merge(data_zone_sig_num, on=['route_id', 'zone_id'], how='left')
# data_zone['num_tra_sig'] = data_zone['num_tra_sig'].fillna(0)
###############
#########################
### calculate local time
# tz_func = tzwhere.tzwhere()
#
# station_unique = data_zone.drop_duplicates(['station_code']).copy()
# station_unique['time_zone'] = station_unique[['lat_mean', 'lng_mean']].apply(lambda x: tz_func.tzNameAt(x[0], x[1]),
# axis=1)
#
# # from_zone = tz.gettz('UTC')
#
# data_zone['zone_seq'] = data_zone.groupby(['route_id'], sort=False).cumcount() + 1
#
# time_diff_list = [0] * len(station_unique)
# count = 0
#
# for idx, row in station_unique.iterrows():
# time_zone_pytz = pytz.timezone(row['time_zone'])
# time_diff = time_zone_pytz.utcoffset(datetime(2018, 7, 30))
# time_diff_list[count] = time_diff
# count += 1
#
# station_unique['time_diff'] = time_diff_list
#
# data_zone = data_zone.merge(station_unique[['station_code', 'time_zone', 'time_diff']], on=['station_code'])
#
# data_zone['departure_date_time_dt'] = pd.to_datetime(data_zone['departure_date_time'], format='%Y-%m-%d %H:%M:%S')
# data_zone['departure_date_time_local'] = data_zone['departure_date_time_dt'] + data_zone['time_diff']
# data_zone['day_of_week'] = data_zone['departure_date_time_local'].dt.dayofweek # Monday 0 Sunday 6
# data_zone['hour'] = data_zone['departure_date_time_local'].dt.hour
#
# # data_zone['hour'].hist()
#
# data_zone['departure_date_time_local'] = data_zone['departure_date_time_local'].astype("str")
# # data_zone = data_zone.fillna(0)
# data_zone = data_zone.drop(columns=['departure_date_time_dt', 'time_diff', 'time_zone'])
#############################
if save_file:
data_zone.to_csv(data_output_path + 'data/zone_data.csv',index = False)
return data_zone
def generate_zone_att_apply(data, save_file, data_apply_output_path):
data['stops'] = data['stops'].fillna('NA')
data.loc[data['type'] == 'Station', 'zone_id'] = 'INIT'
data['zone_id'] = data['zone_id'].fillna(method='bfill')
data['zone_id'] = data['zone_id'].fillna(method='ffill')
# data = data.dropna(subset = ['zone_id'])
data['total_vol'] = data['depth_cm'] * data['height_cm'] * data['width_cm']
# data['num_pkg'] = data.groupby([''])
data['max_dhw'] = np.maximum(np.maximum(data['depth_cm'].values, data['height_cm'].values), data['width_cm'].values)
data['time_window_end_dt'] = | pd.to_datetime(data['time_window_end'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import matplotlib.pyplot as plt
import seaborn as sns
import altair as alt
import panel as pn
import pandas as pd
import numpy as np
plt.style.use('ggplot')
alt.data_transformers.disable_max_rows()
def bv_linePlot(data, engine, xlabel, ylabel1, ylabel2):
data = data.copy()
data.rename(columns={'plotY':ylabel1, 'plotX1':ylabel2}, inplace=True)
if engine == 'Static':
fig, axes = plt.subplots(figsize=(9,6))
axes.plot(data[ylabel1], marker='o', markersize=1.5)
axes.legend([ylabel1])
axes_r = axes.twinx()
axes_r.plot(data[ylabel2], marker='o', markersize=1.5, color='orange')
axes_r.legend([ylabel2], loc=1)
axes.set_xlabel(xlabel, fontsize = 15)
axes.set_ylabel(ylabel1, fontsize = 15)
axes_r.set_ylabel(ylabel2, fontsize = 15)
axes.grid(b=True, which='major', color='k', linewidth=0.25)
plt.close()
return pn.pane.Matplotlib(fig, tight=True)
elif engine == 'Interactive':
data=data.dropna()
# Selection Brush
brush = alt.selection(type='interval', encodings=['x'], name='isel')
# Base Plot
base = alt.Chart(data.reset_index())
base = base.encode(x = alt.X('{0}:T'.format(data.index.name), title=''),
tooltip = ylabel1)
base = base.properties(width = 580, height = 275)
# Upper Plot
upper1 = base.mark_line(color='#3d84ba')
upper1 = upper1.encode(x = alt.X('{0}:T'.format(data.index.name), scale=alt.Scale(domain=brush), title=''),
y = alt.Y('{0}:Q'.format(ylabel1), scale=alt.Scale(zero=False), axis=alt.Axis(format='~s')))
upper2 = base.mark_line(color='#f57542')
upper2 = upper2.encode(x = alt.X('{0}:T'.format(data.index.name), scale=alt.Scale(domain=brush), title=''),
y = alt.Y('{0}:Q'.format(ylabel2), scale=alt.Scale(zero=False), axis=alt.Axis(format='~s')))
# Lower Plot
lower = base.mark_area(line={'color':'darkgray'},
color=alt.Gradient(
gradient='linear',
stops=[alt.GradientStop(color='white', offset=0),
alt.GradientStop(color='darkgray', offset=1)],
x1=1, x2=1,
y1=1, y2=0
))
lower = lower.encode(y=alt.Y('{0}:Q'.format(ylabel1), title='', axis=None))
lower = lower.properties(height=20)
lower = lower.add_selection(brush)
lower.encoding.x.title = 'Interval Selection'
# Base Statistics1
base_stat1 = upper1.transform_filter(brush)
base_stat1 = base_stat1.transform_aggregate(Mean1='mean({0})'.format(ylabel1),
StdDev1='stdev({0})'.format(ylabel1),
Var1='variance({0})'.format(ylabel1))
label_stat1 = base_stat1.transform_calculate(stat_label1="'Mean = ' + format(datum.Mean1, '~s') + \
'; Standard Deviation = ' + format(datum.StdDev1, '~s') +\
'; Variance = ' + format(datum.Var1, '~s')")
label_stat1 = label_stat1.mark_text(align='left', baseline='bottom', color='#3d84ba')
label_stat1 = label_stat1.encode(x=alt.value(0.0), y=alt.value(12.0), text=alt.Text('stat_label1:N'))
# Base Statistics2
base_stat2 = upper2.transform_filter(brush)
base_stat2 = base_stat2.transform_aggregate(Mean2='mean({0})'.format(ylabel2),
StdDev2='stdev({0})'.format(ylabel2),
Var2='variance({0})'.format(ylabel2))
label_stat2 = base_stat2.transform_calculate(stat_label1="'Mean = ' + format(datum.Mean2, '~s') + \
'; Standard Deviation = ' + format(datum.StdDev2, '~s') +\
'; Variance = ' + format(datum.Var2, '~s')")
label_stat2 = label_stat2.mark_text(align='left', baseline='bottom', color='#f57542')
label_stat2 = label_stat2.encode(x=alt.value(0.0), y=alt.value(25.0), text=alt.Text('stat_label1:N'))
upper1 = upper1 + label_stat1
upper2 = upper2 + label_stat2
upper = (upper1+upper2).resolve_scale(y='independent')
## Y LABEL 1
# Values
_ymean_uu1 = data[ylabel1].max()
_ymean1 = data[ylabel1].mean()
# Inspired from :- https://stats.stackexchange.com/a/350278
_maxvar_in_slice1 = ((data[ylabel1].max()-data[ylabel1].min())/2)**2
_ystd_uu1 = np.sqrt(_maxvar_in_slice1)
_ystd1 = data[ylabel1].std()
_yvar_uu1 = _maxvar_in_slice1
_yvar1 = data[ylabel1].var()
# Stat Bar Base
stats_barbase1 = base_stat1.mark_bar(color='#3d84ba')
stats_barbase1 = stats_barbase1.properties(width = 188, height = 20)
# Mean Bar
mean_bar1 = stats_barbase1.encode(x=alt.X('Mean1:Q', title='',
scale=alt.Scale(domain=[-_ymean_uu1,_ymean_uu1]),
axis=alt.Axis(format='~s')), y=alt.value(10.5))
totmean_line1 = alt.Chart(pd.DataFrame({'x': [_ymean1]}))
totmean_line1 = totmean_line1.mark_rule(color='red', size=5)
totmean_line1 = totmean_line1.encode(x='x')
mean_bar1 += totmean_line1
# Standard Deviation Bar
std_bar1 = stats_barbase1.encode(x=alt.X('StdDev1:Q', title='',
scale=alt.Scale(domain=[-_ystd_uu1,_ystd_uu1]),
axis=alt.Axis(format='~s')), y=alt.value(10.5))
totstd_line1 = alt.Chart(pd.DataFrame({'x': [_ystd1]}))
totstd_line1 = totstd_line1.mark_rule(color='red', size=5)
totstd_line1 = totstd_line1.encode(x='x')
std_bar1 += totstd_line1
# Variance Bar
var_bar1 = stats_barbase1.encode(x=alt.X('Var1:Q', title='',
scale=alt.Scale(domain=[-_yvar_uu1,_yvar_uu1]),
axis=alt.Axis(format='~s')), y=alt.value(10.5))
totvar_line1 = alt.Chart(pd.DataFrame({'x': [_yvar1]}))
totvar_line1 = totvar_line1.mark_rule(color='red', size=5)
totvar_line1 = totvar_line1.encode(x='x')
var_bar1 += totvar_line1
## Y LABEL 2
# Values
_ymean_uu2 = data[ylabel2].max()
_ymean2 = data[ylabel2].mean()
# Inspired from :- https://stats.stackexchange.com/a/350278
_maxvar_in_slice2 = ((data[ylabel2].max()-data[ylabel2].min())/2)**2
_ystd_uu2 = np.sqrt(_maxvar_in_slice2)
_ystd2 = data[ylabel2].std()
_yvar_uu2 = _maxvar_in_slice2
_yvar2 = data[ylabel2].var()
# Stat Bar Base
stats_barbase2 = base_stat2.mark_bar(color='#f57542')
stats_barbase2 = stats_barbase2.properties(width = 188, height = 20)
# Mean Bar
mean_bar2 = stats_barbase2.encode(x=alt.X('Mean2:Q', title='Mean',
scale=alt.Scale(domain=[-_ymean_uu2,_ymean_uu2]),
axis=alt.Axis(format='~s')), y=alt.value(10.5))
totmean_line2 = alt.Chart( | pd.DataFrame({'x': [_ymean2]}) | pandas.DataFrame |
import itertools
import traceback
import uuid
from functools import partial, reduce
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from pdb import set_trace as st
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from nninst_graph import AttrMap, Graph, GraphAttrKey
import nninst_mode as mode
from dataset import cifar10
from dataset.mnist_transforms import *
from dataset.config import MNIST_PATH, CIFAR10_PATH
# from nninst.backend.tensorflow.dataset import imagenet, imagenet_raw
# from nninst.backend.tensorflow.dataset.imagenet_hierarchy import imagenet_class_tree
# from nninst.backend.tensorflow.dataset.imagenet_preprocessing import (
# alexnet_preprocess_image,
# )
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from model import LeNet
from model.resnet18cifar10 import ResNet18Cifar10
from model.resnet10cifar10 import ResNet10Cifar10
# from nninst.backend.tensorflow.model import AlexNet, LeNet, ResNet50
from model.config import ModelConfig
# from nninst.backend.tensorflow.model.config import (
# ALEXNET,
# RESNET_50,
# VGG_16,
# ModelConfig,
# )
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from trace.common import (
reconstruct_stat_from_tf,
reconstruct_trace_from_tf_v2,
)
# from nninst.dataset.envs import IMAGENET_RAW_DIR
from nninst_op import Conv2dOp
from nninst_path import (
get_trace_path_in_fc_layers,
get_trace_path_intersection_in_fc_layers,
)
from nninst_statistics import (
calc_trace_path_num,
calc_trace_size,
calc_trace_size_per_layer,
)
from nninst_trace import (
TraceKey,
compact_edge,
compact_trace,
merge_compact_trace,
merge_compact_trace_diff,
merge_compact_trace_intersect,
)
from nninst_utils import filter_value_not_null, merge_dict
from nninst_utils.fs import CsvIOAction, ImageIOAction, IOAction, abspath
from nninst_utils.numpy import arg_approx, arg_sorted_topk
from nninst_utils.ray import ray_iter
__all__ = [
"clean_overlap_ratio",
"overlap_ratio",
"get_overlay_summary",
"resnet_50_imagenet_overlap_ratio",
"alexnet_imagenet_overlap_ratio",
"resnet_50_imagenet_overlap_ratio_error",
"get_overlay_summary_one_side",
"resnet_50_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5",
"resnet_50_imagenet_overlap_ratio_top5_rand",
"resnet_50_imagenet_overlap_ratio_top5",
"alexnet_imagenet_overlap_ratio_error",
"alexnet_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5_rand",
"alexnet_imagenet_overlap_ratio_top5_diff",
]
def calc_all_overlap(
class_trace: AttrMap,
trace: AttrMap,
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
node_name: str = None,
compact: bool = False,
use_intersect_size: bool = False,
key: str = TraceKey.EDGE,
) -> Dict[str, float]:
if node_name is None:
if use_intersect_size:
overlap_ratio, intersect_size = overlap_fn(
class_trace, trace, key, return_size=True
)
return {key + "_size": intersect_size, key: overlap_ratio}
else:
return {
**{
key + "_size": calc_trace_size(trace, key, compact=compact)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
**{
key: overlap_fn(class_trace, trace, key)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
}
else:
all_overlap = {
key: overlap_fn(class_trace, trace, key, node_name)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
}
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]:
if node_name in trace.ops:
node_trace = trace.ops[node_name]
if key in node_trace:
if compact:
all_overlap[key + "_size"] = np.count_nonzero(
np.unpackbits(node_trace[key])
)
else:
all_overlap[key + "_size"] = TraceKey.to_array(
node_trace[key]
).size
return all_overlap
# Compute mnist overlap ratio between the traces of clean test images and class traces
def clean_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
num_gpus:float = 0.2,
images_per_class: int = 1,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_dropout")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
}
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=0.2,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute transformed (translation, rotation and scale)
# mnist overlap ratio between the traces of clean test images and class traces
def translation_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
transforms=None,
name = None,
num_gpus = 0.2,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_augmentation")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# Check the prediction on clean untransformed image, so don't need
# transform
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
# Reconstruct regardless of the correctness of prediction
trace = reconstruct_trace_from_tf_brute_force(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
)
# row = {
# "image_id": image_id,
# **map_prefix(
# calc_all_overlap(
# class_trace_fn(class_id).load(), trace, overlap_fn
# ),
# "original",
# ),
# }
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
# for image_id in range(0, images_per_class)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
acc = len(traces) / (images_per_class * 10)
traces = pd.DataFrame(traces).mean()
traces.loc['accuracy'] = acc
traces = traces.to_frame()
traces.columns = [name]
return traces
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute the mean overlap ratio of attacked image
def attack_overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
"class_id": class_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
# row = calc_all_overlap(
# class_trace_fn(class_id).load(), adversarial_trace, overlap_fn
# )
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
# acc = len(traces) / (images_per_class * 10)
# traces = pd.DataFrame(traces).mean()
# traces.loc['clean_accuracy'] = acc
# traces = traces.to_frame()
# traces.columns = [attack_name]
# return traces
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = abspath(MNIST_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
create_model = lambda: LeNet(data_format="channels_first")
if mode == "test":
dataset = mnist.test
elif mode == "train":
dataset = mnist.train
else:
raise RuntimeError("Dataset invalid")
input = dataset(data_dir,
normed=False,
transforms=transforms,
)
# st()
# input = input.filter(lambda image, label: tf.equal(tf.convert_to_tensor(class_id, dtype=tf.int32), label))
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: dataset(data_dir,
normed=False,
transforms=transforms,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet18_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet18Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
# dataset = input_fn_for_adversarial_examples(
# is_training= False,
# data_dir=data_dir,
# num_parallel_batches=1,
# is_shuffle=False,
# transform_fn=None,
# )
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: get_one_input_from_dataset(
# dataset
# ),
# attack_fn=attack_fn,
# model_dir=ckpt_dir,
# **kwargs,
# )
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= False,
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet10_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet10Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= (dataset_mode=="train"),
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def adversarial_example_image(
example_io: IOAction[np.ndarray], cache: bool = True
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
example = example_io.load()
if example is None:
return None
return (np.squeeze(example, axis=0) * 255).astype(np.uint8)
path = example_io.path.replace(".pkl", ".png")
return ImageIOAction(path, init_fn=get_example, cache=cache)
def generate_examples(
example_fn: Callable[..., IOAction[np.ndarray]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
transform_name: str = "noop",
transforms = None,
cache: bool = True,
num_gpus=0.2,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_io = example_fn(
attack_name=attack_name,
class_id=class_id,
image_id=image_id,
cache=cache,
transforms = transforms,
transform_name = transform_name,
**kwargs,
)
example_io.save()
adversarial_example_image(example_io, cache=cache).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
name = f"{attack_name}_{transform_name}"
print(f"begin {name}, num_gpu={num_gpus}")
if len(image_ids) > 99:
chunksize = 4
else:
chunksize = 1
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=chunksize,
out_of_order=True,
num_gpus=num_gpus,
# huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {name}")
def get_overlay_summary(
overlap_ratios: pd.DataFrame, trace_key: str, threshold=1
) -> Dict[str, int]:
condition_positive = len(overlap_ratios)
if condition_positive == 0:
return {}
original_key = f"original.{trace_key}"
false_positive = np.count_nonzero(overlap_ratios[original_key] < threshold)
adversarial_key = f"adversarial.{trace_key}"
true_positive = np.count_nonzero(overlap_ratios[adversarial_key] < threshold)
predicted_condition_positive = true_positive + false_positive
recall = (true_positive / condition_positive) if condition_positive != 0 else 0
precision = (
(true_positive / predicted_condition_positive)
if predicted_condition_positive != 0
else 0
)
f1 = (2 / ((1 / recall) + (1 / precision))) if recall != 0 and precision != 0 else 0
return dict(
threshold=threshold,
condition_positive=condition_positive,
# predicted_condition_positive=predicted_condition_positive,
original_is_higher=np.count_nonzero(
(overlap_ratios[original_key] - overlap_ratios[adversarial_key]) > 0
),
# adversarial_is_higher=np.count_nonzero(
# (overlap_ratios[adversarial_key] - overlap_ratios[original_key]) > 0),
true_positive=true_positive,
false_positive=false_positive,
recall=recall,
precision=precision,
f1=f1,
)
def overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
# class_id = mnist_info.test().label(image_id)
#
# if class_id != trace.attrs[GraphAttrKey.PREDICT]:
# return [{}] if per_node else {}
if trace is None:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
#
# if adversarial_example is None:
# return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
return row
else:
return {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"node_name": node_name,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
# for image_id in range(0, 50)
for class_id in range(1, 1001)
),
# for class_id in range(1, 2)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_error(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if class_id == trace.attrs[GraphAttrKey.PREDICT]:
return {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 3)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[
class_trace_fn(label).load()
for label in trace.attrs[GraphAttrKey.PREDICT_TOP5]
]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
# deprecated
def alexnet_imagenet_example_trace_old(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace/{name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_example_trace_of_target_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
trace_of_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
return compact_trace(trace_of_target_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace_of_target_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(adversarial_trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace_of_original_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace_of_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
return compact_trace(adversarial_trace_of_original_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace_of_original_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def generate_traces(
trace_fn: Callable[..., IOAction[AttrMap]],
attack_name: str,
class_ids: Iterable[int],
image_ids: Iterable[int],
**kwargs,
):
def generate_traces_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id, **kwargs
).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
results = ray_iter(
generate_traces_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
print(f"finish class {class_id} image {image_id}")
def resnet_50_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
def vgg_16_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=VGG_16,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
def imagenet_example(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
return adversarial_example
name = f"{model_config.name}_imagenet"
path = f"store/example/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=cache, compress=True)
def alexnet_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def resnet_50_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def imagenet_example_trace(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
trace_fn,
class_id: int,
image_id: int,
threshold: float,
per_channel: bool = False,
cache: bool = True,
train: bool = False,
**kwargs,
) -> IOAction[AttrMap]:
def get_example_trace() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: (imagenet_raw.train if train else imagenet_raw.test)(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
if attack_name == "original":
trace = reconstruct_trace_from_tf_v2(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
trace = compact_trace(trace, graph, per_channel=per_channel)
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf_v2(
model_fn=model_fn,
input_fn=adversarial_input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
return adversarial_trace
name = f"{model_config.name}_imagenet"
if train:
name = f"{name}_train"
if per_channel:
trace_name = "example_channel_trace"
else:
trace_name = "example_trace"
path = f"store/{trace_name}/approx_{threshold:.3f}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
# alexnet_imagenet_example_trace = partial(
# imagenet_example_trace,
# model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
# )
#
# resnet_50_imagenet_example_trace = partial(
# imagenet_example_trace, model_config=RESNET_50
# )
#
# vgg_16_imagenet_example_trace = partial(imagenet_example_trace, model_config=VGG_16)
def imagenet_example_stat(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = "avg",
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
def get_example_trace() -> Dict[str, np.ndarray]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# input_fn = lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn = lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
# if predicted_label != class_id:
# return None
if attack_name == "original":
trace = reconstruct_stat_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_stat_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return adversarial_trace
name = f"{model_config.name}_imagenet"
trace_name = "example_stat"
path = (
f"store/{trace_name}/{stat_name}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
)
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
def generate_example_traces(
example_trace_fn: Callable[..., IOAction[AttrMap]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
threshold: float,
per_channel: bool = False,
cache: bool = True,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
train: bool = False,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
cache=cache,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
train=train,
**kwargs,
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def generate_example_stats(
example_trace_fn: Callable[..., IOAction[Dict[str, np.ndarray]]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
stat_name: str = None,
cache: bool = True,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def alexnet_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
(
f"original.{TraceKey.WEIGHT}" in row
and row[f"original.{TraceKey.WEIGHT}"] is not None
)
or (
f"original.{TraceKey.EDGE}" in row
and row[f"original.{TraceKey.EDGE}"]
)
is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def get_predicted_value_contribution(
trace: AttrMap, graph: Graph, class_id: int, create_model, input_fn, model_dir
) -> float:
# print(calc_density_compact(trace, TraceKey.EDGE))
return get_predicted_value(
class_id=class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
prediction_hooks=[MaskWeightWithTraceHook(graph, trace)],
)
def alexnet_imagenet_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5_diff_uint8(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = adversarial_example_image(
alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
)
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_example = (
np.expand_dims(adversarial_example, axis=0).astype(np.float32) / 255
)
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_logit_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
if base_class_id in rest_class_ids:
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
# if (class_id not in adversarial_label_top5) or (adversarial_label not in label_top5):
# return [{}] if per_node else {}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, label_top5, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, label_top5, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label_top5,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_ideal_metrics(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"original_class_rank_in_adversarial_example": get_rank(
class_id=class_id,
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
),
"target_class_rank_in_original_example": get_rank(
class_id=adversarial_label,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
),
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return | pd.DataFrame(traces) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([ | Timestamp('2000-01-16 00:15:00', tz='US/Central') | pandas.Timestamp |
import cv2
import json
from tqdm import tqdm
import pandas as pd
from .bb_polygon import *
def draw_arrow(image, start_point, end_point, color):
start_point = tuple(start_point)
end_point = tuple(end_point)
image = cv2.line(image, start_point, end_point, color, 3)
image = cv2.circle(image, end_point, 8, color, -1)
return image
def draw_start_last_points(ori_im, start_point, last_point, color=(0, 255, 0)):
return draw_arrow(ori_im, start_point, last_point, color)
def draw_one_box(img, box, key=None, value=None, color=None, line_thickness=None):
tl = line_thickness or int(round(0.001 * max(img.shape[0:2]))) # line thickness
coord = [box[0], box[1], box[2], box[3]]
c1, c2 = (int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3]))
img = cv2.rectangle(img, c1, c2, color, thickness=tl*2)
if key is not None and value is not None:
header = f'{key} || {value}'
tf = max(tl - 2, 1) # font thickness
s_size = cv2.getTextSize(f'| {value}', 0, fontScale=float(tl) / 3, thickness=tf)[0]
t_size = cv2.getTextSize(f'{key} |', 0, fontScale=float(tl) / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0] + s_size[0] + 15, c1[1] - t_size[1] - 3
img = cv2.rectangle(img, c1, c2, color, -1) # filled
img = cv2.putText(img, header, (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0],
thickness=tf, lineType=cv2.FONT_HERSHEY_SIMPLEX)
return img
def draw_text(
img,
text,
uv_top_left=None,
color=(255, 255, 255),
fontScale=0.75,
thickness=1,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
outline_color=(0, 0, 0),
line_spacing=1.5,
):
"""
Draws multiline with an outline.
"""
assert isinstance(text, str)
lines = text.splitlines()
if uv_top_left is None:
# set the text start position, at the bottom left of video
(w, h), _ = cv2.getTextSize(
text=lines[0],
fontFace=fontFace,
fontScale=fontScale,
thickness=thickness,
)
text_offset_x = 10
text_offset_y = img.shape[0] - h*(len(lines)+3)
uv_top_left = (text_offset_x, text_offset_y)
uv_top_left = np.array(uv_top_left, dtype=float)
assert uv_top_left.shape == (2,)
for line in lines:
(w, h), _ = cv2.getTextSize(
text=line,
fontFace=fontFace,
fontScale=fontScale,
thickness=thickness,
)
uv_bottom_left_i = uv_top_left + [0, h]
org = tuple(uv_bottom_left_i.astype(int))
if outline_color is not None:
cv2.putText(
img,
text=line,
org=org,
fontFace=fontFace,
fontScale=fontScale,
color=outline_color,
thickness=thickness * 3,
lineType=cv2.LINE_AA,
)
cv2.putText(
img,
text=line,
org=org,
fontFace=fontFace,
fontScale=fontScale,
color=color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
uv_top_left += [0, h * line_spacing]
return img
def draw_anno(image, polygon=None, paths=None):
colors = [(0, 0, 255), # red 0
(0, 255, 0), # green 1
(255, 0, 0), # blue 2
(0, 255, 255), # cyan 3
(128, 0, 128), # purple 4
(0, 0, 0), # black 5
(255, 255, 255)] # white 6
if polygon:
polygon = np.array(polygon, np.int32)
polygon = polygon.reshape((-1, 1, 2))
image = cv2.polylines(image, [polygon], True, colors[0], 5)
if paths:
for path, points in paths.items():
points = np.array(points, np.int32)
image = draw_arrow(image, points[0], points[1], colors[5])
image = cv2.putText(image, path, (points[1][0], points[1][1]),
cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, color=colors[5], thickness=3)
return image
def draw_frame_count(img, frame_id):
text = f"Frame:{frame_id}"
text_offset = (10, 25)
return draw_text(img, text, text_offset, color=(0,255,0))
def load_zone_anno(zone_path):
with open(zone_path, 'r') as f:
anno = json.load(f)
directions = {}
zone = anno['shapes'][0]['points']
for i in anno['shapes']:
if i['label'].startswith('direction'):
directions[i['label'][-2:]] = i['points']
return zone, directions
def find_best_match_direction(obj_vector,paths):
"""
paths: dict {key: vector,...}
"""
directions = list(paths.keys())
best_score = 0
best_match = directions[0]
for direction_id in directions:
vector = paths[direction_id]
score = cosin_similarity(obj_vector, vector)
if score > best_score:
best_score = score
best_match = direction_id
return best_match
def save_tracking_to_csv(track_dict, filename):
num_classes = len(track_dict)
obj_dict = {
'track_id': [],
'frame_id': [],
'box': [],
'color': [],
'label': [],
'direction': [],
'fpoint': [],
'lpoint': [],
'fframe': [],
'lframe': []
}
for label_id in range(num_classes):
for track_id in track_dict[label_id].keys():
direction = track_dict[label_id][track_id]['direction']
boxes = track_dict[label_id][track_id]['boxes']
frames = track_dict[label_id][track_id]['frames']
color = track_dict[label_id][track_id]['color']
frame_first = frames[0]
frame_last = frames[-1]
box_first = boxes[0]
box_last = boxes[-1]
center_point_first = ((box_first[2]+box_first[0]) / 2, (box_first[3] + box_first[1])/2)
center_point_last = ((box_last[2]+box_last[0]) / 2, (box_last[3] + box_last[1])/2)
for i in range(len(track_dict[label_id][track_id]['boxes'])):
obj_dict['track_id'].append(track_id)
obj_dict['frame_id'].append(frames[i])
obj_dict['box'].append(boxes[i].tolist())
obj_dict['color'].append(color)
obj_dict['label'].append(label_id)
obj_dict['direction'].append(direction)
obj_dict['fpoint'].append(center_point_first)
obj_dict['lpoint'].append(center_point_last)
obj_dict['fframe'].append(frame_first)
obj_dict['lframe'].append(frame_last)
df = pd.DataFrame(obj_dict)
df.to_csv(filename, index=False)
def convert_frame_dict(track_dict):
"""
return result dict:
{
frame_id: {
'boxes': [],
'colors': [],
'fpoints': [],
'lpoints': []
}
}
"""
result_dict = {}
num_classes = len(track_dict)
for label_id in range(num_classes):
for track_id in track_dict[label_id].keys():
direction = track_dict[label_id][track_id]['direction']
boxes = track_dict[label_id][track_id]['boxes']
frames = track_dict[label_id][track_id]['frames']
color = track_dict[label_id][track_id]['color']
for i in range(len(track_dict[label_id][track_id])):
frame_id = frames[i]
box = boxes[i]
if frame_id not in result_dict.keys():
result_dict[frame_id] = {
'boxes': [],
'colors': [],
'fpoints': [],
'lpoints': [],
'labels': [],
'directions': []
}
first_box = box[0]
last_box = box[-1]
center_point_first = ((first_box[2]+first_box[0]) / 2, (first_box[3] + first_box[1])/2)
center_point_last = ((last_box[2]+last_box[0]) / 2, (last_box[3] + last_box[1])/2)
result_dict[frame_id]['boxes'].append(box)
result_dict[frame_id]['fpoints'].append(center_point_first)
result_dict[frame_id]['lpoints'].append(center_point_last)
result_dict[frame_id]['directions'].append(direction)
result_dict[frame_id]['colors'].append(color)
result_dict[frame_id]['labels'].append(label_id)
return result_dict
def visualize_one_frame(img, df):
# track_id frame_id box color label direction fpoint lpoint fframe lframe
anns = [
i for i in zip(
df.track_id,
df.box,
df.color,
df.label,
df.fpoint)
]
for (track_id, box, color, label, fpoint) in anns:
box = eval(box)
fpoint = np.array(eval(fpoint)).astype(int)
color = eval(color)
cpoint = np.array([(box[2]+box[0]) / 2, (box[3] + box[1])/2]).astype(int)
img = draw_start_last_points(img, fpoint, cpoint, color)
img = draw_one_box(
img,
box,
key=f'id: {track_id}',
value=f'cls: {label}',
color=color)
return img
def count_frame_directions(df, count_dict):
anns = [
i for i in zip(
df.frame_id,
df.label,
df.direction,
df.lframe)
]
for (frame_id, label, direction, lframe) in anns:
if lframe == frame_id:
count_dict[direction][label] += 1
count_text = []
for dir in count_dict.keys():
tmp_text = f"direction:{dir} || "
for cls_id in count_dict[dir].keys():
tmp_text += f"{cls_id}:{count_dict[dir][cls_id]} | "
count_text.append(tmp_text)
count_text = "\n".join(count_text)
return count_dict, count_text
def visualize_merged(videoloader, csv_path, directions, zones, num_classes, outvid):
df = | pd.read_csv(csv_path) | pandas.read_csv |
import pandas as pd
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import scipy as sp
import numpy as np
FULL_DATASET = True
# Decide whether to run the old code with the full dataset or the new code
# with selected peptides
if (FULL_DATASET):
# Load excel file of party processed data
data_xls = pd.ExcelFile('./data/timeseries/merged_normalized.xlsx')
# Create empty data frame for result data
result_data = pd.DataFrame()
# Load all of the sheets into a list
sheet_list = {}
index = 0
for sheet_name in data_xls.sheet_names:
# Load sheet into list
sheet_list[index] = data_xls.parse(sheet_name)
index += 1
# Get rid of all rows except duplicates
duplicate_data = sheet_list[0][(sheet_list[0]['peptide'].isin(sheet_list[1]['peptide']))].dropna().reset_index(drop=True)
duplicate_data = duplicate_data[(duplicate_data['peptide'].isin(sheet_list[2]['peptide']))].dropna().reset_index(drop=True)
# Trim the duplicate data to just the first four rows (information about peptides)
result_data = duplicate_data.iloc[:,0:4]
# Create variables for the data in A, B, and C
data_A = sheet_list[0][(sheet_list[0]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
data_B = sheet_list[1][(sheet_list[1]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
data_C = sheet_list[2][(sheet_list[2]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
# Add the data from sheets A, B, and C respectively
result_data = pd.concat([result_data, data_A.iloc[:,4:12]], axis=1, ignore_index=True)
result_data = pd.concat([result_data, data_B.iloc[:,4:12]], axis=1, ignore_index=True)
result_data = pd.concat([result_data, data_C.iloc[:,4:12]], axis=1, ignore_index=True)
print(result_data)
# # Get the data for the stats MulitComparison test
array_A = np.asarray(data_A.iloc[:,4:12])
array_B = np.asarray(data_B.iloc[:,4:12])
array_C = np.asarray(data_C.iloc[:,4:12])
# Stack the dataframes into one
df = pd.DataFrame()
df_A = | pd.DataFrame(array_A) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tia.analysis.model.trd import TradeBlotter
from tia.analysis.util import per_level, per_series
__all__ = ['per_level', 'per_series', 'sma', 'ema', 'wilderma', 'ma', 'macd', 'rsi', 'true_range', 'dmi',
'cross_signal', 'Signal']
@per_series()
def sma(arg, n):
""" If n is 0 then return the ltd mean; else return the n day mean """
if n == 0:
return pd.expanding_mean(arg)
else:
return pd.rolling_mean(arg, n, min_periods=n)
@per_series()
def ema(arg, n):
if n == 0:
return pd.ewma(arg, span=len(arg), min_periods=1)
else:
return pd.ewma(arg, span=n, min_periods=n)
@per_series()
def wilderma(arg, n):
converted = arg.dropna()
values = converted.values
if len(values) < n:
return pd.Series(np.nan, index=arg.index)
else:
result = np.empty(len(values), dtype=float)
result[:n - 1] = np.nan
result[n - 1] = values[:n].mean()
i, sz = n, len(values)
pm = 1. / n
wm = 1. - pm
while i < sz:
result[i] = pm * values[i] + wm * result[i - 1]
i += 1
return pd.Series(result, index=converted.index).reindex(arg.index)
def ma(arg, n, matype='sma'):
if matype == 'sma':
return sma(arg, n)
elif matype == 'ema':
return ema(arg, n)
elif matype == 'wma':
return wilderma(arg, n)
else:
raise ValueError('unknown moving average type %s' % matype)
def _ensure_sorf(arg):
if not isinstance(arg, (pd.DataFrame, pd.Series)):
raise Exception('expected Series or DataFrame')
def _ensure_col(arg, **kwds):
for k, v in kwds.items():
if v not in arg:
raise Exception('failed to find column for argument %s=%s' % (k, v))
def true_range(arg, high_col='high', low_col='low', close_col='close', skipna=0):
"""
http://en.wikipedia.org/wiki/Average_true_range
The greatest of the following:
- Current High less the current Low
- Current High less the previous Close (absolute value)
- Curre nt Low less the previous Close (absolute value)
"""
_ensure_col(arg, high_col=high_col, low_col=low_col, close_col=close_col)
yclose = arg[close_col].shift(1)
low, high = arg[low_col], arg[high_col]
mx = pd.DataFrame({'a': high, 'b': yclose}).max(axis=1, skipna=skipna)
mn = pd.DataFrame({'a': low, 'b': yclose}).min(axis=1, skipna=skipna)
result = mx - mn
return pd.Series(result, index=arg.index, name='true_range')
def dmi(arg, n, high_col='high', low_col='low', close_col='close'):
""" Return the dmi+, dmi-, Average directional index
( http://en.wikipedia.org/wiki/Average_Directional_Index )
TODO - break up calcuat
"""
converted = arg[[close_col, high_col, low_col]]
converted.columns = ['close', 'high', 'low']
up_mv = converted.high.diff()
dn_mv = -1 * converted.low.diff()
up_mv[~((up_mv > 0) & (up_mv > dn_mv))] = 0
dn_mv[~((dn_mv > 0) & (dn_mv > up_mv))] = 0
tr = true_range(converted, 'high', 'low', 'close')
atr = wilderma(tr, n)
di_pos = 100. * wilderma(up_mv, n) / atr
di_neg = 100. * wilderma(dn_mv, n) / atr
dx = 100. * np.abs(di_pos - di_neg) / (di_pos + di_neg)
adx = wilderma(dx, n)
data = [
('DI+', di_pos),
('DI-', di_neg),
('DX', dx),
('ADX', adx),
]
return pd.DataFrame.from_items(data)
def aroon(arg, n, up_col='close', dn_col='close'):
"""
TODO - need to verify that the dropna does not take away too many entries (ie maybe set to all? ) This function assumes that
the length of up_col is always equal to dn_col (ie values not missing in just one series)
arg: Series or DataFrame
n: lookback count
columns: list of up column name, down column name or single column name for both or none
"""
if isinstance(arg, pd.DataFrame):
tmp = arg[[up_col, dn_col]].dropna()
idx = tmp.index
upvals = tmp[up_col].values
dnvals = tmp[dn_col].values
else:
tmp = arg.dropna()
idx = tmp.index
upvals = tmp.values
dnvals = upvals
n = int(n)
up, dn = np.empty(len(upvals), 'd'), np.empty(len(upvals), 'd')
up[:n] = np.nan
dn[:n] = np.nan
for i in range(n, len(upvals)):
up[i] = 100. * (n - upvals[i - n:i + 1][::-1].argmax()) / n
dn[i] = 100. * (n - dnvals[i - n:i + 1][::-1].argmin()) / n
osc = up - dn
data = [
('UP', | pd.Series(up, index=idx) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/8 23:47
# @Author : strawsyz
# @File : csv_dataset.py
# @desc:
import pandas as pd
from matplotlib import pyplot as plt
"""read information data from csv file"""
class CsvFile:
def __init__(self, csv_path, encoding=None):
self.data = pd.read_csv(csv_path, encoding=encoding)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
import plotly.express as px
# train-test split by a percentage.
# input: dataframe, label column name, split ration, and random state
# returns: x_train, x_test, y_train, y_test
def split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):
x_train = user_df.sample(frac=split_ratio, random_state=random_value)
x_test = user_df.drop(x_train.index)
return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(
x_train[label_name]), pd.DataFrame(x_test[label_name])
# import data and preprocess it
def preprocessing(file_name: str):
# data import
heart_df = pd.read_csv(file_name)
# converting target to 1 and -1
new_label = []
for x in heart_df['target']:
if x == 1:
new_label.append(1)
else:
new_label.append(-1)
heart_df['target'] = new_label
# heart_df = heart_df.rename(columns={'target': 'label'})
# hot encoding of relevant features
dummy_features_list = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal']
non_dummy_features_list = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target']
new_heart_df = pd.DataFrame(heart_df[non_dummy_features_list])
for feature in dummy_features_list:
new_heart_df = new_heart_df.join(pd.get_dummies(heart_df[feature], prefix=feature))
return heart_df
# Create as arrays of stump tree in a given size
def create_stump_forest(forest_size: int, random_state_local: int):
stump_forest = []
for i in range(0, forest_size, 1):
stump_forest.append(DecisionTreeClassifier(criterion='gini', max_depth=1, random_state=random_state_local))
return stump_forest
# update weight of each row and randomly generate a new weighted data frame
# input: x/y data, predictions list, current stump weight
# return: new weighted x and y data frames
def create_new_weighted_data(x: pd.DataFrame, y: pd.DataFrame, predictions: np.ndarray, stump_weight: list):
# initiate weights
sample_weight = 1/len(x)
new_weights = []
# calculate new weights based on correct and incorrect decisions
for i in range(0, len(predictions), 1):
if predictions[i] == 1:
new_weights.append(sample_weight*np.exp(-np.sum(stump_weight)))
else:
new_weights.append(sample_weight*np.exp(np.sum(stump_weight)))
# normalize weights
sum_of_new_weights = sum(new_weights)
new_normalized_weights = new_weights/sum_of_new_weights
# create normalized distributions weights for random rows pulling
distribution_weights = []
accumulator = 0
for new_normalized_weight in new_normalized_weights:
accumulator += new_normalized_weight
distribution_weights.append(accumulator)
# based to rows weights values, randomly pick new data
new_x = pd.DataFrame(columns=x.columns)
new_y = pd.DataFrame(columns=y.columns)
array_of_distributions = np.asarray(distribution_weights) # transform list to array for np usage
for i in range(0, len(array_of_distributions), 1):
random_number = np.random.uniform(0, 1, 1)
index_of_row = (np.abs(array_of_distributions - random_number)).argmin()
if array_of_distributions[index_of_row] < random_number and index_of_row < len(x)-1:
index_of_row += 1
x_new_row = pd.DataFrame(x.iloc[index_of_row]).T
y_new_row = | pd.DataFrame(y.iloc[index_of_row]) | pandas.DataFrame |
from datetime import datetime
import warnings
import pytest
import pandas as pd
import pyodbc
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, conversion, create
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="module")
def sample():
dataframe = pd.DataFrame(
{
"_varchar": [None, "b", "c", "4", "e"],
"_tinyint": [None, 2, 3, 4, 5],
"_smallint": [256, 2, 6, 4, 5], # tinyint max is 255
"_int": [32768, 2, 3, 4, 5], # smallint max is 32,767
"_bigint": [2147483648, 2, 3, None, 5], # int max size is 2,147,483,647
"_float": [1.111111, 2, 3, 4, 5], # any decicmal places
"_time": [str(datetime.now().time())]
* 5, # string in format HH:MM:SS.ffffff
"_datetime": [datetime.now()] * 4 + [pd.NaT],
"_empty": [None] * 5,
}
)
return dataframe
def test_table_errors(sql):
table_name = "##test_table_column"
with pytest.raises(KeyError):
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, primary_key_column="Z")
def test_table_column(sql):
table_name = "##test_table_column"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "A")
assert all(schema["sql_type"] == "varchar")
assert all(schema["is_nullable"] == True)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "string")
assert all(schema["odbc_type"] == pyodbc.SQL_VARCHAR)
assert all(schema["odbc_size"] == 0)
assert all(schema["odbc_precision"] == 0)
def test_table_pk(sql):
table_name = "##test_table_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "FLOAT"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_composite_pk(sql):
table_name = "##test_table_composite_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(5)", "C": "FLOAT"}
primary_key_column = ["A", "B"]
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, 2, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, False, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_pk_input_error(sql):
with pytest.raises(ValueError):
table_name = "##test_table_pk_input_error"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "DECIMAL(5,2)"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
sql_primary_key=True,
)
def test_table_sqlpk(sql):
table_name = "##test_table_sqlpk"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, sql_primary_key=True)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 2
assert all(schema.index == ["_pk", "A"])
assert all(schema["sql_type"] == ["int identity", "varchar"])
assert all(schema["is_nullable"] == [False, True])
assert all(schema["ss_is_identity"] == [True, False])
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA], index=["_pk", "A"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True])
assert all(schema["pandas_type"] == ["Int32", "string"])
assert all(schema["odbc_type"] == [pyodbc.SQL_INTEGER, pyodbc.SQL_VARCHAR])
assert all(schema["odbc_size"] == [4, 0])
assert all(schema["odbc_precision"] == [0, 0])
def test_table_from_dataframe_simple(sql):
table_name = "##test_table_from_dataframe_simple"
dataframe = pd.DataFrame({"ColumnA": [1]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "ColumnA")
assert all(schema["sql_type"] == "tinyint")
assert all(schema["is_nullable"] == False)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "UInt8")
assert all(schema["odbc_type"] == pyodbc.SQL_TINYINT)
assert all(schema["odbc_size"] == 1)
assert all(schema["odbc_precision"] == 0)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result.equals(dataframe)
def test_table_from_dataframe_datestr(sql):
table_name = "##test_table_from_dataframe_datestr"
dataframe = pd.DataFrame({"ColumnA": ["06/22/2021"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame({
'column_name': pd.Series(['ColumnA','_time_insert']),
'sql_type': pd.Series(['date','datetime2'], dtype='string'),
'is_nullable': pd.Series([False, True]),
'ss_is_identity': pd.Series([False, False]),
'pk_seq': pd.Series([None, None], dtype='Int64'),
'pk_name': pd.Series([None, None], dtype='string'),
'pandas_type': pd.Series(['datetime64[ns]', 'datetime64[ns]'], dtype='string'),
'odbc_type': pd.Series([pyodbc.SQL_TYPE_DATE, pyodbc.SQL_TYPE_TIMESTAMP], dtype='int64'),
'odbc_size': pd.Series([10, 27], dtype='int64'),
'odbc_precision': pd.Series([0, 7], dtype='int64'),
}).set_index(keys='column_name')
assert schema[expected.columns].equals(expected)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_errorpk(sql, sample):
with pytest.raises(ValueError):
table_name = "##test_table_from_dataframe_nopk"
sql.create.table_from_dataframe(table_name, sample, primary_key="ColumnName")
def test_table_from_dataframe_nopk(sql, sample):
table_name = "##test_table_from_dataframe_nopk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key=None
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[True, True, False, False, True, False, False, True, True], dtype="bool"
),
"ss_is_identity": pd.Series([False] * 9, dtype="bool"),
"pk_seq": pd.Series([pd.NA] * 9, dtype="Int64"),
"pk_name": pd.Series([pd.NA] * 9, dtype="string"),
"pandas_type": pd.Series(
[
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision": pd.Series([0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64"),
}
).set_index(keys="column_name")
assert schema[expected.columns].equals(expected.loc[schema.index])
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_sqlpk(sql, sample):
table_name = "##test_table_from_dataframe_sqlpk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key="sql"
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_pk",
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"int identity",
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[False, True, True, False, False, True, False, False, True, True],
dtype="bool",
),
"ss_is_identity": pd.Series([True] + [False] * 9, dtype="bool"),
"pk_seq": pd.Series([1] + [pd.NA] * 9, dtype="Int64"),
"pandas_type": pd.Series(
[
"Int32",
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_INTEGER,
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([4, 0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision": | pd.Series([0, 0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64") | pandas.Series |
## NOTES
## Data was not published on 2021-03-31 (Wednesday) -- not yet corrected but appears to be 3 staff on-campus cases
import sys
import re
import math
import json
import hashlib
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from pathlib import Path
from datetime import datetime, timedelta, date
from collections import defaultdict
DATA_FIELDS = {
9: "staff.on",
10: "staff.off",
11: "student.on",
12: "student.off",
14: "staff7.on",
15: "staff7.off",
16: "student7.on",
17: "student7.off",
19: "stafftotal.on",
20: "stafftotal.off",
21: "studenttotal.on",
22: "studenttotal.off",
}
TEXT_FIELDS = {
1: "Staff",
2: "Students",
4: "On campus *",
5: "Off campus **",
6: "On campus *",
7: "Off campus **",
8: "New cases in last counted 24 hour period ***",
13: "New cases in last counted 7 day period ***",
18: "Total cases since 28 Sept 2020 (start of Term 1)",
}
DEBUG = False
MONDAY = 0
DATE_LABEL = 'date'
DATASET_NAMES = ['staff.on', 'staff.off', 'student.on', 'student.off',
'staff7.on', 'staff7.off', 'student7.on', 'student7.off',
'stafftotal.on', 'stafftotal.off', 'studenttotal.on', 'studenttotal.off']
DATE_UPDATED = 'date.updated'
## These figures need smoothing over the weekend
SMOOTHED_NAMES = ['staff.on', 'staff.off', 'student.on', 'student.off']
def debug_log(*args):
if DEBUG:
print(*args, file=sys.stderr)
def cleanup_value(tag, file_date, field_index):
if tag.string == "New cases in last 24 hours ***" and field_index == 8:
return TEXT_FIELDS[8]
elif tag.string == "New cases in last 7 days ***" and field_index == 13:
return TEXT_FIELDS[13]
s = " ".join(tag.stripped_strings)
if ((file_date == date(2020,10,27) or file_date == date(2020,10,28))
and field_index in set([19, 20, 21, 22])):
## Totals published 2020-10-27 had a dagger symbol
## "The total now includes an additional 89 positive student cases that were confirmed
## by the UCL COVID-19 testing programme. These are mainly cases of symptomatic students
## in university accomodation who did not update Connect to Protect with their
## positive test result."
return s.replace("\u2020", "")
elif ((file_date == date(2020,11,5) or file_date == date(2020,11,6))
and field_index == 16):
## 7-day total was revised on 2020-11-05
## "This number has been updated following a review of historic cases on Connect to Protect"
return s.replace("\u2020", "")
elif file_date == date(2021,7,15) and field_index == 22:
## Was listed as 41, but this was clearly a typo
return "414"
else:
return s
def parse_html_date(groups, file_date):
year = groups[2]
if year is None:
if groups[1] in set(["October", "November", "December"]):
year = "2020"
else:
year = "2021"
html_date = datetime.strptime(groups[0] + " " + groups[1] + " " + year,
"%d %B %Y").date()
return html_date
DATE_RE = re.compile(r"\(last update \w+\s(\d+)\s(\w+)(?:\s(\d+))?\)")
def parse_file(fh, file_date = None):
soup = BeautifulSoup(fh, 'html.parser')
header = soup.select_one('.box > h2:nth-child(1)')
table = soup.select_one('#current-confirmed-cases-covid-19 > div.site-content.wrapper > div > div > div > article > div > table')
data = {}
if header.string == "Daily reported coronavirus cases (last update Tuesday\xa05\xa0January)":
## Handle data for start of term
all_tags = table.find_all(["td","th"])
data["staff.on"] = int(cleanup_value(all_tags[9], file_date, 9))
data["staff.off"] = int(cleanup_value(all_tags[10], file_date, 10))
data["student.on"] = int(cleanup_value(all_tags[11], file_date, 11))
data["student.off"] = int(cleanup_value(all_tags[12], file_date, 12))
data["stafftotal.on"] = int(cleanup_value(all_tags[14], file_date, 13))
data["stafftotal.off"] = int(cleanup_value(all_tags[15], file_date, 14))
data["studenttotal.on"] = int(cleanup_value(all_tags[16], file_date, 15))
data["studenttotal.off"] = int(cleanup_value(all_tags[17], file_date, 16))
data[DATE_UPDATED] = date(2021, 1, 5)
return table, data
match = DATE_RE.search(header.string)
assert(match)
html_date = parse_html_date(match.groups(), file_date)
data[DATE_UPDATED] = html_date
for i, tag in enumerate(table.find_all(["td","th"])):
if i in TEXT_FIELDS:
assert(cleanup_value(tag, file_date, i) == TEXT_FIELDS[i])
elif i in DATA_FIELDS:
data[DATA_FIELDS[i]] = int(cleanup_value(tag, file_date, i))
return table, data
def extract_df():
p = Path('../data')
duplicates = p / 'duplicates'
duplicates.mkdir(exist_ok=True)
original = p / 'original'
last_data = None
last_hash = None
## File date of the last file read
last_date = None
## Data to build into PANDAS dataframe
pd_data = []
tfh = open(p / 'original-tables.html', "w", newline='', encoding="utf-8")
tfh.write('<html><head><meta charset="UTF-8"></head><body>\n')
for file in sorted(original.glob("covid-*.html")):
debug_log("Loading from", file)
with file.open("rb") as fh:
data = fh.read()
if len(data) == 0:
continue
hash = hashlib.sha256()
hash.update(data)
file_hash = hash.hexdigest()
if file_hash == last_hash:
debug_log("File is a duplicate (hash)", file.name)
file.rename(duplicates / file.name)
continue
else:
last_hash = file_hash
with file.open("rb") as fh:
file_date = datetime.strptime(file.name, "covid-%Y-%m-%dT%H-%M-%S.html").date()
if file_date.weekday() == 0:
## Monday, data is correct as of Friday 5pm
data_date = file_date - timedelta(days = 3)
else:
## other days, data is correct as of previous day at 5pm
data_date = file_date - timedelta(days = 1)
table, data = parse_file(fh, file_date)
if data != last_data:
## Check if data has changed but file date has not
is_extra = (file_date == last_date)
if is_extra:
debug_log("Extra data at", file_date)
last_date = file_date
debug_log("New data at", file_date)
if is_extra:
tfh.write('<h2 style="color: red">Extra data published on ' + file_date.strftime("%Y-%m-%d (%A)") + "</h2>\n")
else:
tfh.write("<h2>Data published on " + file_date.strftime("%Y-%m-%d (%A)") + "</h2>\n")
tfh.write("<code>"+file.name+"</code>\n")
tfh.write(str(table))
if (data[DATE_UPDATED] != file_date):
debug_log("Date mismatch at " + str(data[DATE_UPDATED]) +
" (html) and " + str(file_date) + "(file name)")
pd_row = []
pd_row.append( | pd.to_datetime(data_date) | pandas.to_datetime |
import addfips
import os
import pandas as pd
import datetime
ageVariables = {
'DATE': 'date_stamp',
'AGE_RANGE': 'age_group',
'AR_TOTALCASES': 'cnt_confirmed',
'AR_TOTALPERCENT': 'pct_confirmed',
'AR_NEWCASES': 'cnt_confirmed_new',
'AR_NEWPERCENT': 'pct_confirmed_new',
'AR_TOTALDEATHS' : 'cnt_death',
'AR_NEWDEATHS': 'cnt_death_new'
}
countyVariables = {
'DATE': 'date_stamp',
'COUNTY': 'us_county_fips',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSPITALIZED': 'cnt_hospitalized_new',
'TOTAL_HOSPITALIZED': 'cnt_hospitalized',
}
dailyVariables = {
'DATE': 'date_stamp',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSP': 'cnt_hospitalized_new',
'TOTAL_HOSP': 'cnt_hospitalized',
}
raceEthSexVariables = {
'Date': 'date_stamp',
'Category': 'category_type',
'Cat_Detail': 'category_name',
'CAT_DETAIL': 'category_name',
'Cat_CaseCount': 'cnt_confirmed',
'Cat_Percent': 'pct_confirmed',
'CAT_DEATHCOUNT' : 'cnt_death',
'CAT_DEATHPERCENT': 'pct_death'
}
def cleanAgeData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(ageVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Code age ranges
df['age_group'] = df['age_group'].map({ '0-10 years':'00', '11-20 years': '11', '21-30 years': '21', '31-40 years': '31', '41-50 years': '41', '51-60 years': '51', '61-70 years': '61', '71-80 years': '71', '81+ years': '81', 'Pending': '99' })
# multiply the percentages by 100
df['pct_confirmed'] = df['pct_confirmed'].apply(lambda x: round(x*100,4))
df['pct_confirmed_new'] = df['pct_confirmed_new'].apply(lambda x: round(x*100, 4))
#cast count variables to integers
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
# reorder so that the cnt and new are always next to each other in the same order
df = df[['date_stamp', 'age_group', 'cnt_confirmed', 'cnt_confirmed_new', 'pct_confirmed', 'pct_confirmed_new', 'cnt_death', 'cnt_death_new']]
# order the records by date
df = df.sort_values(by=['date_stamp','age_group'], ascending=True)
return df
def cleanCountyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(countyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Copy original county value to keep the pending and out of state values
df['tn_covid_geo'] = df['us_county_fips']
# Change county name to fips code
af = addfips.AddFIPS()
fips = []
for key, value in df['us_county_fips'].items():
fips.append(af.get_county_fips(value, 'Tennessee'))
df['us_county_fips'] = fips
# Copy appropriate fips codes to covid geo
df.loc[(df['tn_covid_geo'] != 'Pending') & (df['tn_covid_geo'] != 'Out of State'), 'tn_covid_geo'] = df['us_county_fips']
df.loc[df['tn_covid_geo'] == 'Pending', 'tn_covid_geo'] = '47PEN'
df.loc[df['tn_covid_geo'] == 'Out of State', 'tn_covid_geo'] = '47OOS'
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(pd.Int32Dtype())
df['cnt_tested_new'] = df['cnt_tested_new'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_recovered_new'] = df['cnt_recovered_new'].astype(pd.Int32Dtype())
df['cnt_recovered'] = df['cnt_recovered'].astype(pd.Int32Dtype())
df['cnt_active_new'] = df['cnt_active_new'].astype(pd.Int32Dtype())
df['cnt_active'] = df['cnt_active'].astype(pd.Int32Dtype())
df['cnt_hospitalized_new'] = df['cnt_hospitalized_new'].astype( | pd.Int32Dtype() | pandas.Int32Dtype |
__all__=["query_unit_data","query_all_data"]
import numpy as np
import pandas as pd
import sqlite3
import pathlib
# this is internal use.
def query_unit_data(unitcode,start_date,end_date=None,train_days=7):
#print(pathlib.Path(__file__).parents[0].joinpath('data','overlook','temp_db.db').__str__())
conn=sqlite3.connect(pathlib.Path(__file__).parents[1].joinpath('data','bldg1','temp_db.db').__str__())
strftime_format="%Y-%m-%d %H:%M:%S" #filter time in this format
start_date_utc=pd.Timestamp(start_date,tz="America/Indianapolis").tz_convert("UTC")
if end_date is None:
end_date_utc=(start_date_utc+pd.Timedelta(days=train_days))
else:
end_date_utc=pd.Timestamp(end_date,tz="America/Indianapolis").tz_convert("UTC")
query_weather=f"SELECT * from WEATHER where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}'"
data_weather= pd.read_sql_query(query_weather, conn)
query_gem=f"SELECT * from GEM where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}' and unitcode='{unitcode}' "
data_gem= pd.read_sql_query(query_gem, conn)
query_ecobee=f"SELECT * from ECOBEE where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}' and unitcode='{unitcode}' "
data_ecobee= pd.read_sql_query(query_ecobee, conn)
conn.close()
# convert timestamp to INDY time
data_gem['timestamp']=pd.to_datetime(data_gem['timestamp'],utc=True).dt.tz_convert("America/Indianapolis")
data_weather['timestamp']=pd.to_datetime(data_weather['timestamp'],utc=True).dt.tz_convert("America/Indianapolis")
data_ecobee['timestamp']=pd.to_datetime(data_ecobee['timestamp'],utc=True).dt.tz_convert("America/Indianapolis")
# select columns
data_gem['hvac']=data_gem['heatpump']+data_gem['ahu']
data_gem=data_gem[['timestamp','unitcode','ahu','heatpump','hvac','net']]
# wattsecond to watt
dtime=pd.Timedelta(data_gem['timestamp'][1]-data_gem['timestamp'][0]).seconds
#print(dtime)
data_gem=data_gem.apply(lambda x: x/dtime if x.name in ['ahu', 'heatpump','hvac','net'] else x)
data_ecobee=data_ecobee[['timestamp','unitcode','operation','t_unit','rh_unit']]
data_ecobee['t_unit']=((data_ecobee['t_unit'].to_numpy())-32)/1.8 # F to C
data_ecobee['rh_unit']=((data_ecobee['rh_unit'].to_numpy()/100)) # % to -
vec=data_ecobee['operation'].to_numpy()
vec[vec=="heat"]="heat1"
vec[vec=="cool"]="cool1"
vec[vec=="aux"]="aux1"
vec[vec=="heat_aux"]="heat1_aux1"
data_ecobee['operation']=vec
#data_ecobee['t_rs_m']=((data_ecobee['t_rs_m'].to_numpy())-32)/1.8 # F to C
#data_ecobee['sp_heat']=((data_ecobee['sp_heat'].to_numpy())-32)/1.8 # F to C
#data_ecobee['sp_cool']=((data_ecobee['sp_cool'].to_numpy())-32)/1.8 # F to C
data_weather=data_weather[['timestamp','t_out','rh_out']]
data_weather['t_out']=((data_weather['t_out'].to_numpy())-32)/1.8 # F to C
data_weather['rh_out']=((data_weather['rh_out'].to_numpy()/100)) # % to -
# join tables
data_unit=pd.merge(pd.merge(data_gem,data_ecobee,on=['timestamp','unitcode'],how='left'),data_weather,on='timestamp',how='left')
data_unit=data_unit.rename(columns={"t_out":"T_out"})
data_unit=data_unit.rename(columns={"t_unit":"T_in"})
data_unit=data_unit.rename(columns={"rh_unit":"rh_in"})
#self.observed_model_input=data_unit[['timestamp','function','state','setpoint_cooling','setpoint_heating','t_out']].copy()
return data_unit
def query_all_data(start_date,train_days=7):
#print(pathlib.Path(__file__).parents[0].joinpath('data','overlook','temp_db.db').__str__())
conn=sqlite3.connect(pathlib.Path(__file__).parents[1].joinpath('data','bldg1','temp_db.db').__str__())
strftime_format="%Y-%m-%d %H:%M:%S" #filter time in this format
start_date_utc=pd.Timestamp(start_date,tz="America/Indianapolis").tz_convert("UTC")
end_date_utc=(start_date_utc+pd.Timedelta(days=train_days))
query_weather=f"SELECT * from WEATHER where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}'"
data_weather= pd.read_sql_query(query_weather, conn)
query_gem=f"SELECT * from GEM where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}'"
data_gem= pd.read_sql_query(query_gem, conn)
query_ecobee=f"SELECT * from ECOBEE where timestamp>= '{start_date_utc.strftime(strftime_format)}' and timestamp <'{end_date_utc.strftime(strftime_format)}'"
data_ecobee= pd.read_sql_query(query_ecobee, conn)
conn.close()
# convert timestamp to INDY time
data_gem['timestamp']=pd.to_datetime(data_gem['timestamp'],utc=True).dt.tz_convert("America/Indianapolis")
print(data_gem.head())
data_weather['timestamp']=pd.to_datetime(data_weather['timestamp'],utc=True).dt.tz_convert("America/Indianapolis")
data_ecobee['timestamp']= | pd.to_datetime(data_ecobee['timestamp'],utc=True) | pandas.to_datetime |
import os
from tqdm import tqdm
import albumentations as A
import cv2
import matplotlib.pyplot as plt
import time
from visualizer import visualize
from PIL import Image
import pandas as pd
DIR_IMG_SRC = "data\\img\\ori"
DIR_MASK_SRC = "data\\img\\mask"
MASK_FORMAT = ".png"
IMG_FORMAT = ".jpg"
N_IMG = len(os.listdir(DIR_IMG_SRC))
N_AUG_PER_IMG = 0
DATASET = | pd.read_csv("data\\label\\dataset.csv", sep=',', index_col=0) | pandas.read_csv |
"""Test attributing simple impact."""
import numpy as np
import pandas as pd
import pytest
from nbaspa.data.endpoints.pbp import EventTypes
from nbaspa.player_rating.tasks import SimplePlayerImpact
@pytest.mark.parametrize(
"evt",
[
EventTypes.REBOUND,
EventTypes.FREE_THROW,
EventTypes.VIOLATION,
EventTypes.FIELD_GOAL_MISSED
]
)
def test_basic_impact(evt):
"""Test attributing simple impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": evt,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_foul_impact():
"""Test attributing foul impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.FOUL,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": [456, 123],
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([-0.1, 0.1]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_deadball_impact():
"""Test attributing deadball turnover impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.TURNOVER,
"NBA_WIN_PROB_CHANGE": 0.1,
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_steal_impact():
"""Test attributing steal impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.TURNOVER,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["STL", None],
"VISITORDESCRIPTION": [None, "STL"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": [456, 123],
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([-0.1, 0.1]))
assert output["PLAYER2_IMPACT"].equals( | pd.Series([0.1, -0.1]) | pandas.Series |
###############################################################################################
#### Initialization
import pandas as pd
import numpy as np
df = pd.read_csv(filename, header=None, names=col_names, na_values={'col_name':['-1']}, \
parse_dates=[[0, 1, 2]], index_col='Date')
# if the first 3 columns are 'year','month','day', then the dataframe would have a single col named
# 'year_month_day' of datatype 'datatime64[ns]'
# Can use df.index = df['year_month_day'] to reassign this col as the index of df
## EDA == Exploratory Data Analysis
###############################################################################################
#### Basic data exploration
df.shape # shape of dataframe
df.head(7) # print the head part of dataset
df.tail(5) # print the tail part of dataset
df.info() # return data type of each column, and number of non-null values
df.count() # count items for each column
df.describe() # summary stat of numerical data
# df.mean(), df.median(), df.std(), df.quantile([0.25, 0.75]), df.min(), df.max()
df['one_col_name'].unique() # unique values in a column
df['one_col_name'].value_counts(dropna=False) # return frequency counts of a column
df['one_col_name'].value_counts(dropna=False).head() # note the result of prev line is a pandas Series
df.idxmax(axis=0) # Or use axis='index'
df.idxmin(axis=1) # Or use axis='columns'
# indexes of max/min vals for each column/row
###############################################################################################
#### Row & column index manipulation
df.columns # names of all the columns, usually class of Index
# can be assigned with a list of new names.
df.index # can be assigned with list of new indexes. # row indexes, can be class of Index or DatatimeIndex
df.index = df.index.map(str.lower) # use map to transform the index with a function
# pandas Index objects are immutable. Must reset the whole indexes of df at once
df = df.set_index(['col1', 'col2']) # change to multiple index (index being of class MultiIndex)
df = df.sort_index() # change multiple index to hierarchical index
# use tuple to slice multiple index
# use slice(None) to indicate ":" in the tuple
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
df.reindex(ordered_index) # order rows by original index with the order in ordered_index
# ordered_index = somehow ordered list of original df indices
# if some item in ordered_index is not in orig_df_indices, there would be a row with that index but NA values
df.sort_index()
###############################################################################################
#### Data visualization for inspection
# use Bar plots for discrete data counts
# use Histograms for continuous data counts
df['one_col_name'].plot('hist')
import matplotlib.pyplot as plt
plt.show()
df.boxplot(column='one_numerical_col', by='one_categorical_col') # two columns are involved
df.boxplot(column='population', by='continent') # example of above
###############################################################################################
#### Data extraction & assignment (general)
## direct column access by column name
df["country"] # This is 1D labeled array (class: pandas.core.series.Series)
df[["country"]] # This is dataframe (class: pandas.core.frame.DataFrame)
## row/column access by (built-in) numerircal indexes
df[1:2] # single row as a dataframe...
# Note: row slicing cannot use a single number, which would be regarded as a col name
df.iloc[1] # row as pandas Series
df.iloc[[1, 2, 3]]
df.iloc[[1,2,3], [0, 1]]
df.iloc[:, [0,1]]
## row/column access by labels
df.loc["RU"] # row as Pandas Series
df.loc[["RU", "IN", "CH"]] # row as Pandas dataframe
df.loc[["RU", "IN", "CH"], ["country", "capital"]]
df.loc[:, ["country", "capital"]]
## filtering
df[df["area"] > 8]
df[np.logical_and(df["area"] > 8, df["area"] < 10)] # or use the next line
df[(df["area"] > 8 & df["area"] < 10)]
df[np.logical_or(df["area"] < 8, df["area"] > 10)] # or use the next line
df[(df["area"] < 8 | df["area"] > 10)]
## extract df values as ndarrays
data_array = df.values # extract the values as ndarray
col_array = df['col_name'].values # extract column values as ndarray
np.concatenate([arr1, arr2], axis=1)
## create new columns
df['new_col'] = df['existing_col'].str[0] # extract 1st char of 'existing_col' and save as 'new_col' in df
# note that 'str' here is an attribute name
df['str_split'] = df['existing_col'].str.split('_') # split string with '_' and save as 'str_split' col
df['new_col0'] = df['str_split'].str.get(0)
df['new_col1'] = df['str_split'].str.get(1)
df['new_col'] = df['col_name'].str.upper()
df['new_mask_col'] = df['col_name'].str.contains('given_substring') # Boolean data
for label, row in df.iterrows():
df.loc[label, "new_col"] = len(row["country"]) # added a new column "new_col" as function of existing data
df["new_col"] = df["country"].apply(len)
df['new_col'] = 0.0 # assign values with broadcasting
## create new copies of existing dataframes
df2 = df.copy()
sorted_df = df.sort_values('col_name') # sort rows (including index) by values in col 'col_name'
## modify existing entries
df.iloc[::3, -1] = np.nan # assign values with broadcasting
## delete row/column
del df['col_name']
df.drop(['col_name1', 'col_name2'], axis=1)
df.drop([1, 2]) # delete rows by numerical indexes
df.drop(index='row_ind') # delete rows by row index
## manage data types
df['treatment b'] = df['treatment b'].astype(str)
df['sex'] = df['sex'].astype('category')
df['treatment a'] = pd.to_numeric(df['treatment a'], errors='coerce') # force conversion
## manage duplicate rows
df = df.drop_duplicates() # drop duplicate rows
## manage missing data (NA/null/NaN)
df_dropped = df.dropna(how='any') # drop rows with NaN values
df['sex'] = df['sex'].fillna(obj_to_fill) # in 'sex' column, fill NaN with obj_to_fill (e.g. mean value)
checker_df = df.notnull() # boolean for each entry of the dataframe
checker_df_reverse = df.isnull() # boolean for each entry of the dataframe
checker_each_col = df.notnull().all() # aggregated for each column
checker_each_col_reverse = df.isnull().any() # aggregated for each column
checker_col = df.one_col_name.notnull() # boolean for the col "one_col_name"
###############################################################################################
#### tidy data
# tidy data principle: rows contain observations, columns form variables
# pd.melt(): solve the problem of columns (names) containing values, instead of variables
# ... by turning columns into rows
new_df = pd.melt(frame=df, id_vars=list_names_cols, value_vars=['treatment a', 'treatment b'], \
var_name='treatment', value_name='result')
# the columns in list_names_cols remain unchanged
# the 'treatment a' and 'treatment b' cols become values of a new col called 'treatment'
# the original table values are collected as values of a new col called 'result'
# pivot: opposite of melting
# ... by taking unique values from a column and create new columns
weather_tidy = weather.pivot(index='date', columns='element', values='value')
# the levels in 'element' column become new col names
# if the values are not specified or multiple, the new columns would become hierarchical index
# if there is duplicate conflict, use aggregate function
weather_tidy = weather.pivot(index='date', columns='element', values='value', aggfunc=np.mean)
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
###############################################################################################
#### Data (table) joining/concatenation (like in SQL)
## concatenate dataframes
vertical_stacked = df1.append(df2) # indices are also stacked
vertical_stacked.reset_index(drop=True) # result would be the same as the following line
vertical_stacked = pd.concat([df1, df2], axis=0, ignore_index=True) # new indexes range from 0 to n_tot
hori_cat = pd.concat([df1, df2], axis=1, join='outer') # rows with the same index would be merged to single row. cols are stacked
hori_cat = pd.concat([df1, df2], axis=1, join='inner') # only return rows with index in both df1 and df2
df1.join(df2, how='inner/outer/left/right') # join by index
## concatenate lots of tables
import glob
csv_files = glob.glob('*.csv')
list_data = [pd.read_csv(filename) for filename in csv_files]
pd.concat(list_data)
## merge data (index is usually ignored)
| pd.merge(left=df_state_populations, right=df_state_codes, on=None, left_on='state', right_on='name') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 27 13:53:50 2016
@author: au194693
"""
import numpy as np
import scipy.io as sio
import pandas as pd
from my_settings import *
data = sio.loadmat(data_path + "behavoural_results.mat")["data_all"]
b_df = pd.DataFrame()
for j in range(len(data)):
baseline = data[j, 0].mean()
invol_trials = data[j, 3].squeeze()
if len(invol_trials) is 90:
invol_trials = invol_trials[1:]
error = (np.std(data[j, 3]) * 2 + invol_trials.mean(),
-np.std(data[j, 3]) * 2 + invol_trials.mean())
for i in range(len(invol_trials)):
row = pd.DataFrame([{"subject": "p%s" % (j + 2),
"condition": "invol",
"binding": invol_trials[i] - baseline,
"trial_number": i + 1,
"trial_status":
error[1] <=
(invol_trials[i] - baseline) <= error[0],
"error": error,
"raw_trial": invol_trials[i],
"baseline": baseline}])
b_df = b_df.append(row, ignore_index=True)
# b_df = pd.DataFrame()
for j in range(len(data)):
baseline = data[j, 0].mean()
vol_trials = data[j, 2].squeeze()
# if len(vol_trials) is 90:
# vol_trials = vol_trials[1:]
error = (np.std(data[j, 3]) * 2 + vol_trials.mean(),
-np.std(data[j, 3]) * 2 + vol_trials.mean())
for i in range(len(vol_trials)):
row = pd.DataFrame([{"subject": "p%s" % (j + 2),
"condition": "vol",
"binding": vol_trials[i] - baseline,
"trial_number": i + 1,
"trial_status":
error[1] <=
(vol_trials[i] - baseline) <= error[0],
"error": error,
"raw_trial": vol_trials[i],
"baseline": baseline}])
b_df = b_df.append(row, ignore_index=True)
# Calculate mean correlation
b_df = | pd.read_csv(results_folder + "/behavioural_results.csv") | pandas.read_csv |
# GLMMOD @hamiHamtaro
# dependencies:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
import pandas as pd
import scipy as sp
import statistics
import mat73
class glm:
def __init__(self, ST, P, hd):
# remove nans and infinite values
idx_finite = np.where(np.isfinite(P[:,1]))[0]
idx_notnan = np.where(~np.isnan(P[:,1]))[0]
keep_idx = np.intersect1d(idx_finite, idx_notnan)
self.P = P[keep_idx,:]
self.x = P[keep_idx,1]
self.y = P[keep_idx,2]
self.t = P[keep_idx,0]
self.hd = hd[keep_idx,0];# make sure input is [0,2pi]
self.dt = P[1,0]-P[0,0]
self.ST = ST # spiketimes (not train)
def get_size(self):
'''get size of recording box'''
boxsz = np.nanmax([np.nanmax(self.x), np.nanmax(self.y)])
return boxsz
def pos_map(self, nbins=10):
'''design matrix for position variables'''
boxsz = self.get_size()
bins = np.arange(boxsz/nbins/2, boxsz-boxsz/nbins/2, round(boxsz/nbins))
posgrid = np.zeros((len(self.x), nbins**2))
for idx,val in enumerate(self.x):
xvec = np.abs(self.x[idx]-bins); yvec = np.abs(self.y[idx]-bins);
min_x = np.min(xvec)
min_y = np.min(yvec)
idx_x = np.where(xvec == min_x); idx_x = idx_x[0][0];
idx_y = np.where(yvec == min_y); idx_y = idx_y[0][0];
bin_idx = np.ravel_multi_index((idx_y,idx_x), dims=(nbins,nbins), order='C') # a11=0, a12=1, a13=2;
posgrid[idx, bin_idx] = 1
return posgrid, bins
def eb_map(self, nbins=10, rp=[75,75]):
'''design matrix for egocentric variables'''
refx = rp[0]; refy = rp[1]
allo = (np.arctan2(refy-self.y, refx-self.x) + (np.pi/2)) % (2*np.pi) # add 90 deg (so that up is 0 deg)
ego = (allo - self.hd) % (2*np.pi)
ego = sp.ndimage.gaussian_filter1d(ego, 3) # smooth by 3 std.
egogrid = np.zeros((len(self.P),nbins))
bins = np.linspace(0, 2*np.pi,nbins+1)
bins[0] = -0.0001
bins[-1] = 2.*np.pi+0.0001
for i in np.arange(nbins):
whiches = (ego>bins[i])*(ego<=bins[i+1])
egogrid[whiches, i] = 1
# print(np.sum(egogrid,axis=0))
return egogrid,ego, bins
def hd_map(self, nbins=10):
'''design matrix for head direction'''
hd = sp.ndimage.gaussian_filter1d(self.hd, 3) # smooth by 3 std.
hdgrid = np.zeros((len(self.P),nbins));
bins = np.linspace(0, 2*np.pi,nbins+1)
bins[0] = -0.0001
bins[-1] = 2.*np.pi+0.0001
for i in np.arange(nbins):
whiches = (hd>bins[i])*(hd<=bins[i+1])
hdgrid[whiches, i] = 1
return hdgrid, bins
def conv_spktrain(self, Xx=np.linspace(-4,4,9),
sigma=2,c=0,defaultST=True,spikeIn=[1,2,3],dt=0.02):
'''get smoothed spiketrain from spiketimes (in Hz)
**kwargs:
spikeTrain- 'False' if user wants self.ST (spiketimes)
'True' if user wants to use a pre-allocated spiketrain
spikeIn- use this optional kwarg iff spikeTrain==True
'''
if defaultST==True:
t = self.t; dt = self.dt; # time per frame
boolean_spk = np.logical_and(t[0] <= self.ST, self.ST <= t[-1])
spikes = self.ST[boolean_spk == True]
edgesT = np.linspace(t[0], t[-1], len(t)+1)
binnedSpikes, timeEdges = np.histogram(spikes, edgesT)
elif defaultST==False:
binnedSpikes = spikeIn
# remove any nans/infinite values in spiketrain
idx_inf = np.where(~np.isfinite(binnedSpikes))[0]
idx_nan = np.where(np.isnan(binnedSpikes))[0]
replace_idx = np.union1d(idx_inf, idx_nan)
binnedSpikes[replace_idx] = 0
# convolve w/ gaussian membership function
filt = np.exp((-(Xx-c)**2)/(2*(sigma**2)))
smooth_spike_count = np.convolve(binnedSpikes, filt, mode='same')
smooth_fr = smooth_spike_count/dt # rate (hz)
return smooth_fr, binnedSpikes, filt, dt
def get_speed(self):
'''get speed of the animal (cm*s^-2)'''
t=self.P[:,0]
x=self.P[:,1]
y=self.P[:,2]
ntime = len(t)
v = np.zeros((ntime,1));
for idx in range(1,ntime-1):
v[idx,0] = np.sqrt((x[idx+1]-x[idx-1])**2 + (y[idx+1]-y[idx-1])**2)/(t[idx+1]-t[idx-1])
v[0,0] = v[1,0]; v[-1,0] = v[-2,0] # pad the array
return v
def speed_threshold(self,inputData):
v = self.get_speed()
maxspeed=50; minspeed=4
inbounds = np.logical_and((v<=maxspeed), (v>=minspeed))
inbounds = np.where(inbounds==True); inbounds = inbounds[0]
if np.ndim(inputData) == 1:
filtData = inputData[inbounds]
if np.ndim(inputData) == 2:
filtData = inputData[inbounds,:]
return filtData
def squish_statemat(self, spiketrain, stateIn, modelType='PE'):
""" Combine state matrices for multivariate models and compose
expression for calculating rate parameter. Spiketrain should be counts, and
not smoothed.
Parameters
----------
spiketrain : np array
speed-thresholded spiketrain (counts)
stateIn : np array or list of np arrays
for example [posgrid,ebgrid]
stateIn : str
model label, for example 'PE'
Returns
-------
df
dataframe with response variable and state matrix
expr
expression for the model of interest
"""
if modelType == 'PE':
posgrid = stateIn[0]; ebgrid = stateIn[1]
ntime,nbins_eb = np.shape(ebgrid)
_,nbins_p = np.shape(posgrid)
A = np.zeros((ntime, nbins_p+nbins_eb)) #P+EB
A[:,0:nbins_p] = posgrid; A[:,nbins_p:] = ebgrid
df = | pd.DataFrame(A) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import requests
import pandas as pd
import numpy as np
import time
class FMP_CONNECTION(object):
def __init__(self,api_key:str):
self._api_key = api_key
def set_apikey(self,new_apikey):
self._api_key = new_apikey
def get_apikey(self) -> str:
return self._api_key
def _merge_dfs(first_df:pd.DataFrame, second_df:pd.DataFrame, how:str = 'left'):
cols_to_use = second_df.columns.difference(first_df.columns)
new_df = pd.merge(first_df, second_df[cols_to_use], left_index=True, right_index=True, how=how)
return new_df
def _get_df(self,url:str,is_historical:bool = False) -> pd.DataFrame:
response = requests.get(url)
if response.status_code == 200:
if response.json() == {}:
print('Requested instrument is empty when retrieving data')
return None
if is_historical == False:
response_df = pd.DataFrame.from_dict(response.json())
return response_df
else:
symbol = response.json()['symbol']
df = pd.DataFrame.from_dict(response.json()['historical'])
df.insert(0,'symbol',symbol)
df['date'] = pd.to_datetime(df['date'],infer_datetime_format=True)
df.sort_values(by='date',ascending=True,inplace=True)
df.set_index('date',inplace=True)
df.set_index = pd.to_datetime(df.index, infer_datetime_format=True)
return df
else:
raise ConnectionError('Could not connect to FMP Api, this was the response: \n',response.json())
def historical_price_by_interval(self,ticker:str,interval:str='1d') -> pd.DataFrame:
"""
Retrieve historical price data from various time granularities
Parameters
----------
ticker:str :
The ticker of the financial instrument to retrieve historical price data.
api_key:str :
your FMP API Key
interval: {1min,5min,15min,30min,1hour,4hour,1d,1w,1m,1q,1y} :
The granularity of how often the price historical data must be retrieved
(Default value = '1d')
Returns
-------
pd.DataFrame
"""
url = None
# Retrieve Historical info from 1 min to 4 hours
if interval in ['4hour','1hour','30min','15min','5min','1min']:
url = f'https://financialmodelingprep.com/api/v3/historical-chart/{interval}/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url)
historical_df.insert(0,'symbol',ticker)
if 'close' and 'date' in list(historical_df.columns):
historical_df.sort_values(by='date',ascending=True,inplace=True)
historical_df.set_index('date',inplace=True)
historical_df.index = pd.to_datetime(historical_df.index, infer_datetime_format=True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
# Retrieve Daily Info
elif interval == '1d':
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['daily'] = pd.to_datetime(historical_df.index, infer_datetime_format=True)
# Retrieve Weekly, Monthly, Quarterly and Yearly Price Data
if interval == '1w':
historical_df['week'] = historical_df['daily'].dt.to_period('w').apply(lambda r: r.start_time)
df = historical_df.drop_duplicates(subset=['week'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1m':
historical_df['monthly'] = historical_df['daily'].astype('datetime64[M]')
df = historical_df.drop_duplicates(subset=['monthly'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1q':
historical_df['quarter'] = historical_df['daily'].dt.to_period('q')
df = historical_df.drop_duplicates(subset=['quarter'], keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1y':
historical_df['year'] = historical_df['daily'].dt.year
df = historical_df.drop_duplicates(subset=['year'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
else:
raise ValueError('unsupported interval for ',interval,'check your spelling')
def historical_closing_price(self,ticker:str,interval:str = '1d'):
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={self._api_key}'
df = self._get_df(url,True)
if df is None:
return None
# df['date'] = pd.to_datetime(df.index, infer_datetime_format=True)
if interval == '1d':
return df
elif interval == '1w':
df['week'] = df['date'].dt.to_period('w').apply(lambda r: r.start_time)
df = df.drop_duplicates(subset=['week'], keep='first')
df = df.drop(columns=['week'])
elif interval == '1m':
df['monthly'] = df['date'].astype('datetime64[M]')
df = df.drop_duplicates(subset=['monthly'],keep='first')
df = df.drop(columns=['monthly'])
df['date'] = df['date'].astype('datetime64[M]')
elif interval == '1q':
df['quarter'] = df['date'].dt.to_period('q')
df = df.drop_duplicates(subset=['quarter'], keep='first')
df = df.drop(columns=['quarter'])
elif interval == '1y':
df['year'] = df['date'].dt.year
df = df.drop_duplicates(subset=['year'],keep='first')
df = df.drop(columns=['year'])
df = df.drop(columns=['date'])
return df
def get_closing_prices(self,tickers:[str], interval:str = '1d', from_date:str = None):
if isinstance(tickers,str):
df = self.historical_closing_price(tickers,interval)
closing_df = pd.pivot_table(data=df,index=df.index,columns='symbol',values='close',aggfunc='mean')
closing_df.index = pd.to_datetime(closing_df.index, infer_datetime_format=True)
from_d = from_date if from_date != None else closing_df.index.min()
return closing_df[from_d:]
else:
dfs = []
for ticker in tickers:
df = self.historical_closing_price(ticker,interval)
dfs.append(df)
x = pd.concat(dfs)
closing_df = pd.pivot_table(data=x, index=x.index, columns='symbol',values='close',aggfunc='mean')
closing_df.index = pd.to_datetime(closing_df.index, infer_datetime_format=True)
from_d = from_date if from_date != None else closing_df.index.min()
return closing_df[from_d:]
## CRYPTO CURRENCIES RELATED
def get_crypto_quote(self,ticker):
if isinstance(ticker,str):
url = f'https://financialmodelingprep.com/api/v3/quote/{ticker}?apikey={self.get_apikey()}'
df = self._get_df(url)
return df
elif isinstance(ticker,list):
dfs = []
for tick in ticker:
url = f'https://financialmodelingprep.com/api/v3/quote/{tick}?apikey={self.get_apikey()}'
df = self._get_df(url)
dfs.append(df)
cryptos = pd.concat(dfs)
cryptos.set_index('symbol',inplace=True)
return cryptos
def get_available_cryptos(self,min_marketcap=None):
url = f'https://financialmodelingprep.com/api/v3/symbol/available-cryptocurrencies?apikey={self.get_apikey()}'
df = self._get_df(url)
tickers = df['symbol'].unique()
quotes_info = []
for ticker in tickers:
quote = self.get_crypto_quote(ticker=ticker)
time.sleep(0.1)
quotes_info.append(quote)
quotes_df = | pd.concat(quotes_info) | pandas.concat |
import numpy as np
import imageio
import os
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from brainio_base.stimuli import StimulusSet
class Stimulus:
def __init__(self, size_px=[448, 448], bit_depth=8,
stim_id=1000, save_dir='images', type_name='stimulus',
format_id='{0:04d}'):
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
self.type_name = type_name
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
self.size_px = size_px
self.objects = []
self.stimulus = np.ones(self.size_px, dtype=np.uint8) * self.gray
def add_object(self, stim_object):
self.objects.append(stim_object)
def build_stimulus(self):
for obj in self.objects:
self.stimulus[obj.mask] = obj.stimulus[obj.mask]
def clear_stimulus(self):
self.stimulus = np.ones(self.size, dtype=np.uint8) * self.gray
def show_stimulus(self):
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
file_name= self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
class Grating:
def __init__(self, orientation=0, phase=0, sf=2, size_px=[448, 448], width=8,
contrast=1, bit_depth=8, pos=[0, 0], rad=5, sig=0,
stim_id=1000, format_id='{0:04d}', save_dir='images', type_name='grating'):
# save directory
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
# label for type of stimulus
self.type_name = type_name
# 1 channel colors, white, black, grey
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
# pixel dimensions of the image
self.size_px = np.array(size_px)
# position of image in field of view
self.pos = np.array(pos)
# pixel to visual field degree conversion
self.px_to_deg = self.size_px[1] / width
# size of stimulus in visual field in degrees
self.size = self.size_px / self.px_to_deg
# orientation in radians
self.orientation = orientation / 180 * np.pi
# phase of the grating
self.phase = phase / 180 * np.pi
# spatial frequency of the grating
self.sf = sf
# contrast of the grating
self.contrast = contrast
# make self.xv and self.yv store the degree positions of all pixels in the image
self.xv = np.zeros(size_px)
self.yv = np.zeros(size_px)
self.update_frame()
self.mask = np.ones(size_px, dtype=bool)
self.set_circ_mask(rad=rad)
self.tex = np.zeros(size_px)
self.stimulus = np.ones(size_px, dtype=np.uint8) * self.gray
self.envelope = np.ones(size_px)
if sig is 0:
self.update_tex()
else:
self.set_gaussian_envelope(sig)
def update_frame(self):
x = (np.arange(self.size_px[1]) - self.size_px[1]/2) / self.px_to_deg - self.pos[1]
y = (np.arange(self.size_px[0]) - self.size_px[0]/2) / self.px_to_deg - self.pos[0]
# all possible degree coordinates in matrices of points
self.xv, self.yv = np.meshgrid(x, y)
def update_tex(self):
# make the grating pattern
self.tex = (np.sin((self.xv * np.cos(self.orientation) + self.yv * np.sin(self.orientation)) *
self.sf * 2 * np.pi + self.phase) * self.contrast * self.envelope)
def update_stimulus(self):
self.stimulus[self.mask] = np.uint8(((self.tex[self.mask]+1)/2)*self.white)
self.stimulus[np.logical_not(self.mask)] = self.gray
def set_circ_mask(self, rad):
# apply operation to put a 1 for all points inclusively within the degree radius and a 0 outside it
self.mask = self.xv**2 + self.yv**2 <= rad ** 2
# same as circular mask but for an annulus
def set_annular_mask(self, inner_rad, outer_rad):
self.mask = (self.xv ** 2 + self.yv ** 2 <= outer_rad ** 2) * \
(self.xv ** 2 + self.yv ** 2 > inner_rad ** 2)
def set_gaussian_envelope(self, sig):
d = np.sqrt(self.xv**2 + self.yv**2)
self.envelope = np.exp(-d**2/(2 * sig**2))
self.update_tex()
def show_stimulus(self):
# pyplot stuff
self.update_stimulus()
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
# save to correct (previously specified) directory
self.update_stimulus()
file_name = self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
def load_stim_info(stim_name, data_dir):
stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str})
image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values,
[os.path.join(data_dir, image_name) for image_name
in stim['image_file_name'].values]))
stim_set = StimulusSet(stim[stim.columns[:-1]])
stim_set.image_paths = image_paths
stim_set.identifier = stim_name
return stim_set
def gen_blank_stim(degrees, size_px, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
stim = Stimulus(size_px=[size_px, size_px], type_name='blank_stim', save_dir=save_dir, stim_id=0)
stimuli = pd.DataFrame({'image_id': str(0), 'degrees': [degrees]})
image_names = (stim.save_stimulus())
stimuli['image_file_name'] = pd.Series(image_names)
stimuli['image_current_local_file_path'] = pd.Series(save_dir + os.sep + image_names)
stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False)
def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
width = degrees
nStim = grat_params.shape[0]
print('Generating stimulus: #', nStim)
stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim})
image_names = nStim * [None]
image_local_file_path = nStim * [None]
all_y = nStim * [None]
all_x = nStim * [None]
all_c = nStim * [None]
all_r = nStim * [None]
all_s = nStim * [None]
all_o = nStim * [None]
all_p = nStim * [None]
for i in np.arange(nStim):
stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 +
grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6])
grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2],
rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5],
phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir,
size_px=[size_px, size_px], type_name=stim_name)
image_names[i] = (grat.save_stimulus())
image_local_file_path[i] = save_dir + os.sep + image_names[i]
all_y[i] = grat_params[i, 0]
all_x[i] = grat_params[i, 1]
all_c[i] = grat_params[i, 2]
all_r[i] = grat_params[i, 3]
all_s[i] = grat_params[i, 4]
all_o[i] = grat_params[i, 5]
all_p[i] = grat_params[i, 6]
stimuli['position_y'] = pd.Series(all_y)
stimuli['position_x'] = pd.Series(all_x)
stimuli['contrast'] = pd.Series(all_c)
stimuli['radius'] = pd.Series(all_r)
stimuli['spatial_frequency'] = pd.Series(all_s)
stimuli['orientation'] = pd.Series(all_o)
stimuli['phase'] = pd.Series(all_p)
stimuli['image_file_name'] = pd.Series(image_names)
stimuli['image_current_local_file_path'] = pd.Series(image_local_file_path)
stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False)
def gen_grating_stim_old(degrees, size_px, stim_name, grat_contrast, grat_pos, grat_rad, grat_sf, grat_orientation,
grat_phase, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
width = degrees
nStim = len(grat_pos) * len(grat_pos) * len(grat_contrast) * len(grat_rad) * len(grat_sf) * len(grat_orientation) \
* len(grat_phase)
print('Generating stimulus: #', nStim)
stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim})
image_names = nStim * [None]
image_local_file_path = nStim * [None]
all_y = nStim * [None]
all_x = nStim * [None]
all_c = nStim * [None]
all_r = nStim * [None]
all_s = nStim * [None]
all_o = nStim * [None]
all_p = nStim * [None]
i = 0
for y in np.arange(len(grat_pos)):
for x in np.arange(len(grat_pos)):
for c in np.arange(len(grat_contrast)):
for r in np.arange(len(grat_rad)):
for s in np.arange(len(grat_sf)):
for o in np.arange(len(grat_orientation)):
for p in np.arange(len(grat_phase)):
grat = Grating(width=width, pos=[grat_pos[y], grat_pos[x]],
contrast=grat_contrast[c], rad=grat_rad[r],
sf=grat_sf[s], orientation=grat_orientation[o],
phase=grat_phase[p],
stim_id=np.uint64(
y * 10e9 + x * 10e7 + r * 10e5 + s * 10e3 + o * 10e1 + p),
format_id='{0:012d}', save_dir=save_dir, size_px=[size_px, size_px],
type_name=stim_name)
image_names[i] = (grat.save_stimulus())
image_local_file_path[i] = save_dir + os.sep + image_names[i]
all_y[i] = grat_pos[y]
all_x[i] = grat_pos[x]
all_c[i] = grat_contrast[c]
all_r[i] = grat_rad[r]
all_s[i] = grat_sf[s]
all_o[i] = grat_orientation[o]
all_p[i] = grat_phase[p]
i += 1
stimuli['position_y'] = pd.Series(all_y)
stimuli['position_x'] = | pd.Series(all_x) | pandas.Series |
import pandas as pd
import numpy as np
# 1 创建dataframe
d = {
'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])
}
df1 = pd.DataFrame(d)
print(df1)
# 2 创建dataframe
df2 = pd.DataFrame({
'A': 1.,
'B': | pd.Timestamp('20160101') | pandas.Timestamp |
import json
import os
from datetime import datetime
import joblib
import numpy as np
import pandas as pd
from xgboost import XGBRegressor, XGBClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
np.set_printoptions(precision=2)
"""
Generating the gradient boosting model with the pre-saved hyperparameters.
"""
def generate_model(simulation_folder, method='RF', mode='classification'):
"""
Function for generating model for given scenario and feature based model
:param simulation_folder: str, name of subfolder for given data set
:return: none, model is saved down as side effect
"""
Start = datetime.now()
project_directory = os.path.dirname(os.getcwd())
path_to_data = os.path.join(project_directory, "Data", simulation_folder)
path_to_characteristics_data = os.path.join(path_to_data, "Characteristics")
path_to_model = os.path.join(project_directory, "Models", simulation_folder,
method, "Model")
path_to_hyperparameters = os.path.join(path_to_model, "hyperparameters.json")
X_train = np.load(os.path.join(path_to_characteristics_data, "X_train.npy"))
y_train = np.load(os.path.join(path_to_characteristics_data, "y_train.npy"))
with open(path_to_hyperparameters, 'r') as f:
param_data = json.load(f)
if method == 'RF':
if mode == 'classification':
model = RandomForestClassifier()
elif mode == 'regression':
model = RandomForestRegressor()
elif method == 'GB':
if mode == 'classification':
model = GradientBoostingClassifier()
elif mode == 'regression':
model = GradientBoostingRegressor()
elif method == 'XGB':
if mode == 'classification':
model = XGBClassifier()
elif mode == 'regression':
model = XGBRegressor()
model.set_params(**param_data)
model.fit(X_train, y_train)
joblib.dump(model, os.path.join(path_to_model, 'model.sav'))
End = datetime.now()
ExecutedTime = End - Start
df = | pd.DataFrame({'ExecutedTime': [ExecutedTime]}) | pandas.DataFrame |
# Created by woochanghwang at 21/05/2020
# Modified by woochanghwang at 12/07/2020
# Modified at 18/07/2020
# Modified at 19/07/2021 for Method paper
'''
Make input files for CIRCUS
including selected drug targets in the network, key genes
# Modified by woochanghwang at 21/07/2020
- Hidden enrichment test (High level paths)
- Metabolism of RNA, Immune System, Cell Cycle, Other
# Modified by Woochang at 19/07/2021
- for Method paper
'''
import pandas as pd
import csv
import networkx as nx
import toolbox.data_handler as dh
import toolbox.visual_utilities as vu
def make_link_file(network_edge, circos_data_file_a, circos_link_file_a):
'''
- edge file : [from to]
- circos data file : [chr band start(number) to(number) name]
:return: circos link file [ band start to band start to ]
'''
# network_edge_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/src_visual/ULK1_sigGene_diNetwork.tsv"
# circos_data_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_text.txt"
# circos_link_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_link_v2.txt"
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
circos_link = []
for edge in network_edge:
link_from = circos_band_dict.get(edge[0],'NA')
link_to = circos_band_dict.get(edge[1],'NA')
if link_from == 'NA' or link_to == 'NA' : continue
circos_link.append('\t'.join(link_from+link_to))
with open(circos_link_file_a,'w') as circos_link_f:
circos_link_f.write('\n'.join(circos_link))
def make_link_with_symbol_file(network_edge_addr, circos_data_file_a, circos_link_file_a):
'''
- edge file : [from to]
- circos data file : [chr band start(number) to(number) name]
:return: circos link file [ band start to band start to ]
'''
# network_edge_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/src_visual/ULK1_sigGene_diNetwork.tsv"
# circos_data_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_text.txt"
# circos_link_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_link_v2.txt"
with open(network_edge_addr) as network_edge_f:
network_edge = [x.strip().split('\t') for x in network_edge_f.readlines()]
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
# gene_pos = line[:-1]
gene_band = line[0]
circos_band_dict[gene] = gene_band
circos_link = []
# print(network_edge[:5])
for edge in network_edge:
link_from = circos_band_dict.get(edge[0],'NA')
link_to = circos_band_dict.get(edge[1],'NA')
if link_from == 'NA' or link_to == 'NA' : continue
circos_link.append([edge[0],link_from,edge[1],link_to])
print(circos_link[:5])
circos_link_df = pd.DataFrame(circos_link, columns=["Gene A","Gene A Group","Gene B","Gene B Group"])
circos_link_df.to_excel(circos_link_file_a,index=False)
# with open(circos_link_file_a,'w') as circos_link_f:
# circos_link_f.write('\n'.join(circos_link))
def make_link_file_for_specific_groups(network_edge, groups, circos_data_file_a, circos_link_file_a):
'''
- edge file : [from to]
- circos data file : [chr band start(number) to(number) name]
:return: circos link file [ band start to band start to ]
'''
# network_edge_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/src_visual/ULK1_sigGene_diNetwork.tsv"
# circos_data_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_text.txt"
# circos_link_file_a = "/Users/woochanghwang/PycharmProjects/LifeArc/General/circos/ulk1/data/ulk1_sigGene_cluster_link_v2.txt"
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
group_genes = []
for circos_genes in circos_band_data:
if circos_genes[0] in groups:
# print(circos_genes)
group_genes.append(circos_genes[-1])
# print(group_genes)
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
circos_link = []
for edge in network_edge:
if len(set(edge)&set(group_genes)) >=1:
link_from = circos_band_dict.get(edge[0],'NA')
link_to = circos_band_dict.get(edge[1],'NA')
if link_from == 'NA' or link_to == 'NA' : continue
circos_link.append('\t'.join(link_from+link_to))
with open(circos_link_file_a,'w') as circos_link_f:
circos_link_f.write('\n'.join(circos_link))
def make_rwr_hist_file_for_heatmap (circos_data_file_a, rwr_result_file_addr, rwr_hist_file_a):
'''
Gene RWR for histogram
:return: [clsuter, gene, fold change]
'''
with open(rwr_result_file_addr) as rwr_result_f:
gene_rwr_result = [x.strip().split('\t') for x in rwr_result_f.readlines()]
gene_rwr_list = [x[:2] for x in gene_rwr_result[1:]] # header = False
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
circos_gene_fc_data = []
for gene_rwr in gene_rwr_list:
gene_pos = circos_band_dict[gene_rwr[0]]
rwr = str(gene_rwr[1])
a_gene = [gene_pos[0], gene_pos[1], gene_pos[1], rwr, 'id=rwr']
circos_gene_fc_data.append('\t'.join(a_gene))
with open(rwr_hist_file_a, 'w') as circos_gene_rwr_f:
circos_gene_rwr_f.write('\n'.join(circos_gene_fc_data))
def get_whole_node():
core_network_addr = "/Users/woochanghwang/PycharmProjects/LifeArc/ULK/data/string_interactions_GBM_ULK_e150_in_TCGA.tsv"
# core_network_addr = "/Users/woochanghwang/PycharmProjects/LifeArc/ULK/data/string_interactions_new_symbol.tsv"
core_network_df= pd.read_table(core_network_addr,sep='\t')
core_G = nx.from_pandas_edgelist(core_network_df,'node1','node2')
print(len(core_G.nodes))
key_gene_addr = "/Users/woochanghwang/PycharmProjects/LifeArc/ULK/data/GBM_OT_TCGA_ULK.txt"
key_gene_df = pd.read_table(key_gene_addr,sep='\t')
key_gene = key_gene_df['Gene']
print(len(set(key_gene)-set(core_G.nodes())))
from_ppi_genes = list(set(core_G.nodes()) - set(key_gene))
new_gene_list = []
for gene in from_ppi_genes:
new_gene_list.append(['STRING',gene])
for string_gene in new_gene_list:
key_gene_df = key_gene_df.append(pd.Series(string_gene,index=['Group','Gene']),ignore_index=True)
print(key_gene_df)
key_gene_df.to_csv("/Users/woochanghwang/PycharmProjects/LifeArc/ULK/data/GBM_OT_TCGA_STRING_ULK.txt",'\t',index=False,header=False)
def make_tcga_fc_file_for_heatmap(tcga_fc_addr, circos_data_file_a, circos_gene_fc_file_a):
tcga_logfc_result = dh.load_obj(tcga_fc_addr)
circos_data_df = pd.read_csv(circos_data_file_a,sep='\t',names=['Group','posA','posB','Gene'])
whole_gene = list(circos_data_df['Gene'])
print(whole_gene)
# df_final_result = pd.DataFrame(columns=['Gene', 'FoldChange(log2)'])
whole_fc_gene = []
for gene in whole_gene:
whole_fc_gene.append(tcga_logfc_result.get(gene, ''))
gene_fc_list = list(zip(whole_gene,whole_fc_gene))
# gene_fc_df = pd.DataFrame(gene_fc_list, columns=['Gene', 'FoldChange(log2)'])
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
# for gene, gene_pos in circos_band_dict.items():
# gene_fc_df = gene_fc_df.replace(gene, '\t'.join(gene_pos))
circos_gene_fc_data = []
for gene_fc in gene_fc_list:
gene_pos = circos_band_dict[gene_fc[0]]
fc = str(gene_fc[1])
if fc == 'inf': fc = ''
a_gene = [gene_pos[0],gene_pos[1], gene_pos[1],fc,'id=fc']
circos_gene_fc_data.append('\t'.join(a_gene))
with open(circos_gene_fc_file_a,'w') as circos_gene_fc_f:
circos_gene_fc_f.write('\n'.join(circos_gene_fc_data))
def make_drug_score_file_for_heatmap(drug_score_file, circos_data_file_a, circos_data_final_file_a):
'''
Gene RWR for histogram
:return: [clsuter, gene, fold change]
'''
with open(drug_score_file) as drug_score_f:
drug_score_list = [x.strip().split('\t') for x in drug_score_f.readlines()]
drug_score_list = drug_score_list[1:] #header = False
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
circos_gene_drug_data = []
for drug_score in drug_score_list:
gene_pos = circos_band_dict.get(drug_score[0],'NA')
if gene_pos == 'NA': continue
score = str(drug_score[1])
a_gene = [gene_pos[0], gene_pos[1], gene_pos[1], score, 'id=drug']
circos_gene_drug_data.append('\t'.join(a_gene))
with open(circos_data_final_file_a, 'w') as circos_gene_drug_f:
circos_gene_drug_f.write('\n'.join(circos_gene_drug_data))
def make_depmap_score_file_for_heatmap(depmap_score_file_a, circos_data_file_a, circos_depmap_score_file_a):
with open(depmap_score_file_a) as depmap_score_f:
drug_depmap_data = [x.strip().split('\t') for x in depmap_score_f.readlines()]
depmap_score_list = []
for drug in drug_depmap_data[1:]: #Header = False
a_depmap = [drug[0],drug[-1]]
depmap_score_list.append(a_depmap)
print(depmap_score_list[:10])
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
circos_gene_depmap_data = []
for depmap_score in depmap_score_list:
gene_pos = circos_band_dict.get(depmap_score[0],'NA')
if gene_pos == 'NA': continue
score = str(depmap_score[1])
a_gene = [gene_pos[0], gene_pos[1], gene_pos[1], score, 'id=depmap']
circos_gene_depmap_data.append('\t'.join(a_gene))
with open(circos_depmap_score_file_a, 'w') as circos_gene_depmap_f:
circos_gene_depmap_f.write('\n'.join(circos_gene_depmap_data))
def make_final_target_file_for_heatmap(gene_list_file_ulk1,gene_list_file_ulk2, circos_data_file_a, circos_final_targets_file_a):
'''
Gene RWR result from files
:return: [clsuter, gene, fold change]
'''
result_ulk1_df = pd.read_table(gene_list_file_ulk1, sep='\t')
result_ulk2_df = pd.read_table(gene_list_file_ulk2, sep='\t')
print(list(result_ulk1_df))
print(list(result_ulk2_df))
final_col_names=['Gene','Final score normalized']
ulk1_final_df = result_ulk1_df[final_col_names]
ulk2_final_df = result_ulk2_df[final_col_names]
ulk1_top20_df = ulk1_final_df.sort_values(by=['Final score normalized'],ascending=False).iloc[:20]
ulk2_top20_df = ulk2_final_df.sort_values(by=['Final score normalized'],ascending=False).iloc[:20]
ulk1_2_final_top20_df = pd.concat([ulk1_top20_df,ulk2_top20_df]).drop_duplicates(subset='Gene',keep='last').reset_index(drop=True)
# #################
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
gene_pos = line[:-1]
circos_band_dict[gene] = gene_pos
for gene,gene_pos in circos_band_dict.items():
ulk1_2_final_top20_df = ulk1_2_final_top20_df.replace(gene,'\t'.join(gene_pos))
ulk1_2_final_top20_df.insert(2,'id','id=fc')
print(ulk1_2_final_top20_df)
ulk1_2_final_top20_df[['cluster','pos_x','pos_y']]=ulk1_2_final_top20_df.Gene.str.split("\t",expand=True)
ulk1_2_final_top20_df_write = ulk1_2_final_top20_df[['cluster','pos_x','pos_y','Final score normalized','id']]
print(ulk1_2_final_top20_df_write)
ulk1_2_final_top20_df_write.to_csv(circos_final_targets_file_a, sep='\t', header=False,
index=False,quoting=csv.QUOTE_NONE,quotechar="", escapechar='\\')
def make_final_target_file_for_text(gene_list_file_ulk1,gene_list_file_ulk2, circos_data_file_a, circos_final_targets_file_a):
result_ulk1_df = pd.read_table(gene_list_file_ulk1, sep='\t')
result_ulk2_df = pd.read_table(gene_list_file_ulk2, sep='\t')
print(list(result_ulk1_df))
print(list(result_ulk2_df))
final_col_names=['Gene','Final score normalized']
ulk1_final_df = result_ulk1_df[final_col_names]
ulk2_final_df = result_ulk2_df[final_col_names]
ulk1_top20_df = ulk1_final_df.sort_values(by=['Final score normalized'],ascending=False).iloc[:20]
ulk2_top20_df = ulk2_final_df.sort_values(by=['Final score normalized'],ascending=False).iloc[:20]
ulk1_2_final_top20_df = pd.concat([ulk1_top20_df,ulk2_top20_df]).drop_duplicates(subset='Gene',keep='last').reset_index(drop=True)
# #################
circos_band_dict = dict()
with open(circos_data_file_a) as circos_band_f:
circos_band_data = [x.strip().split('\t') for x in circos_band_f.readlines()]
for line in circos_band_data:
gene = line[-1]
# gene_pos = line[:-1]
gene_pos = line # too keep gene name
circos_band_dict[gene] = gene_pos
for gene,gene_pos in circos_band_dict.items():
# print(gene,gene_pos)
ulk1_2_final_top20_df = ulk1_2_final_top20_df.replace(gene,'\t'.join(gene_pos))
print(ulk1_2_final_top20_df)
ulk1_2_final_top20_df[['cluster','pos_x','pos_y','Gene']]=ulk1_2_final_top20_df.Gene.str.split("\t",expand=True)
ulk1_2_final_top20_df_text= ulk1_2_final_top20_df[['cluster','pos_x','pos_y','Gene']]
print(ulk1_2_final_top20_df_text)
ulk1_2_final_top20_df_text.to_csv(circos_final_targets_file_a, sep='\t', header=False,
index=False,quoting=csv.QUOTE_NONE,quotechar="", escapechar='\\')
def make_color_file(virus, circos_data_file_a, circos_color_file_a):
circos_data_df = pd.read_csv(circos_data_file_a, sep='\t', names=['Group','posA','posB','Gene'])
groups = circos_data_df.Group.unique()
print(groups)
group_size_df = circos_data_df.groupby('Group').size()
print(group_size_df['hs1'])
circos_color_info = []
##################
# Add Chr info
##################
# grp_names = ['DIP','Virus_entry','Virus_replication','Virus/Immune','Metabolism','Anti-inflammatory','Immune_system','Ohter','DEP']
# grp_names = ['E','M','N','nsp1','nsp2','nsp4','nsp5','nsp6','nsp7','nsp8','nsp9','nsp10','nsp12','nsp13','nsp14','nsp15','orf3a','orf6','orf7a','orf8','orf9b','orf9c','orf10',
# 'VR_VE_MB','VE_MB','DEP','VR','AI_IR','Unknwown']
###v2
# grp_names = ['E', 'M', 'N', '1', '2', '4', '5', '6', '7', '8', '9', '10', '12',
# '13', '14', '15', '3a', '6', '7a', '8', '9b', '9c', '10',
# 'VRVEMB', 'DEP', 'VR', 'AIIR', 'Unknown']
###Localization
if virus == "SARS-CoV":
grp_names = ['E', 'M', 'N', '2', '6', '9', '10', '12',
'13', '15', '16', '3a','3b', '7a','7b', '8a','8b', '9b','S',
'Met.RNA','CellCycle','Immune','Met.Proteins', 'Other','DEP']
elif virus =="SARS-CoV-2":
grp_names = ['E', 'M', 'N', '1','4', '6', '9', '3', '7a','7b','8',
'Met.RNA','CellCycle','Immune','Met.Proteins', 'Other','DEP']
# grp_names = ['T','PPT','AG','DRUG','TF','TCGA','FRANK','MIDDLE']
chr_number = 0
for grp,name in zip(groups,grp_names):
chr_number += 1
# print(grp, name)
a_grp = ['chr','-',grp,name,'0',str(group_size_df[grp]),'chr'+str(chr_number)]
# print(a_grp)
circos_color_info.append('\t'.join(a_grp))
################
# Add Band Info
################
with open(circos_data_file_a) as circos_data_f:
circos_dota_for_color = [x.strip().split('\t') for x in circos_data_f.readlines()]
#band hs1 ULK1 ULK1 0 1 grey
for gene in circos_dota_for_color:
a_band = ['band',gene[0],gene[-1],gene[-1],gene[1],gene[2],'grey']
circos_color_info.append('\t'.join(a_band))
##############
# make file
##############
with open(circos_color_file_a,'w') as circos_color_f:
circos_color_f.write('\n'.join(circos_color_info))
def make_circos_data_file_for_covid(virus, circos_data_file_a, circos_nodes_file_a):
circos_covid_network_df = pd.read_csv(circos_nodes_file_a, sep='\t', names=['Group','Gene'])
circos_covid_network_df = circos_covid_network_df.drop_duplicates(subset='Gene')
circos_covid_network_df = circos_covid_network_df.reset_index(drop=True)
circos_group_list = list(circos_covid_network_df['Group'])
circos_posA_list = []
circos_posB_list = []
cur_group = circos_group_list[0]
posA=0
posB=1
for group in circos_group_list:
if group == cur_group:
circos_posA_list.append(posA)
circos_posB_list.append(posB)
posA +=1
posB += 1
else:
cur_group = group
posA=0
posB=1
circos_posA_list.append(posA)
circos_posB_list.append(posB)
posA +=1
posB +=1
circos_covid_network_df['posA'] = pd.Series(circos_posA_list)
circos_covid_network_df['posB'] = pd.Series(circos_posB_list)
circos_covid_network_df = circos_covid_network_df[['Group','posA','posB','Gene']]
# circos_covid_network_df = circos_covid_network_df.astype({'posA':int, 'posB':int})
if virus == "SARS-CoV":
###localization
circos_covid_network_df = circos_covid_network_df.replace({'Group':{'E':'hs1',
'M':'hs2',
'N':'hs3',
'S':'hs4',
'NSP2':'hs5',
'NSP6':'hs6',
'NSP9':'hs7',
'NSP10':'hs8',
'NSP12': 'hs9',
'NSP13': 'hs10',
'NSP15': 'hs11',
'NSP16': 'hs12',
'ORF3a':'hs13',
'ORF3b':'hs14',
'ORF7a':'hs15',
'ORF7b':'hs16',
'ORF8a':'hs17',
'ORF8b':'hs18',
'ORF9b':'hs19',
'Met.RNA': 'hs20',
'CellCycle': 'hs21',
'Immune': 'hs22',
'Met.Proteins': 'hs23',
'Other': 'hs24',
'DEP': 'hs25'
}})
elif virus == "SARS-CoV-2":
circos_covid_network_df = circos_covid_network_df.replace({'Group':{'E':'hs1',
'M':'hs2',
'N':'hs3',
'NSP1':'hs4',
'NSP4':'hs5',
'NSP6':'hs6',
'NSP9':'hs7',
'ORF3':'hs8',
'ORF7a':'hs9',
'ORF7b':'hs10',
'ORF8':'hs11',
'Met.RNA': 'hs12',
'CellCycle': 'hs13',
'Immune': 'hs14',
'Met.Proteins': 'hs15',
'Other': 'hs16',
'DEP': 'hs17'
}})
print(circos_covid_network_df)
circos_covid_network_df.to_csv(circos_data_file_a,sep='\t',index=False,header=False)
def get_drug_targets_in_network(network_time,candidate_drug_df):
# print(network_time)
candidate_drugs_in_networktime = candidate_drug_df[candidate_drug_df['Network_time']==network_time]
candidate_drugs_target_list = candidate_drugs_in_networktime['Target Proteins in Network'].to_list()
drug_targets = []
for targets in candidate_drugs_target_list:
drug_targets += targets.split(',')
drug_targets = list(set(drug_targets))
return drug_targets
import re
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)',text) ]
def get_sig_genes_by_centrality(centrality_results_6hr, centrality_bands):
threshold = 0.01
sig_genes_dict = dict()
for centrality in centrality_bands:
centrality_pvalue_col = "{}_pvalue".format(centrality)
sig_genes = centrality_results_6hr[centrality_results_6hr[centrality_pvalue_col]<=threshold]['Gene'].to_list()
sig_genes_dict[centrality] = sig_genes
# common_genes = []
# for key, value in sig_genes_dict.items():
# print(key, len(value),value[:5])
# if len(common_genes) == 0:
# common_genes = value
# else:
# common_genes = list(set(common_genes)&set(value))
#
# print("common:",len(common_genes))
# sig_genes_without_common = dict()
return sig_genes_dict
def sort_hidden_by_centrality(covid_network_hidden_keyGene, centrality_bands):
centrality_results_6hr = pd.read_csv("/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Centrality/6hr/round2/COVID_6hr_gene_score_by_centrality_pvalue.csv")
centrality_results_24hr = pd.read_csv("/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Centrality/24hr/round6/COVID_24hr_gene_score_by_centrality_pvalue.csv")
centrality_genes_6hr = get_sig_genes_by_centrality(centrality_results_6hr,centrality_bands)
centrality_genes_24hr = get_sig_genes_by_centrality(centrality_results_24hr, centrality_bands)
######################
# for venn
######################
centrality_genes_all_for_venn = {}
centrality_genes_24hr_hidden_for_venn = {}
centrality_genes_6hr_hidden_for_venn ={}
###################
centrality_genes_in_hidden_dict = dict()
common_genes = []
for centrality in centrality_bands:
sig_genes_6hr = centrality_genes_6hr[centrality]
sig_genes_24hr = centrality_genes_24hr[centrality]
centality_sig_genes= list(set(sig_genes_6hr).union(set(sig_genes_24hr)))
centality_sig_genes_in_hidden = list(set(centality_sig_genes)&set(covid_network_hidden_keyGene))
if len(common_genes) == 0:
common_genes = centality_sig_genes_in_hidden
else:
common_genes = list(set(common_genes)&set(centality_sig_genes_in_hidden))
centrality_genes_in_hidden_dict[centrality] = sorted(centality_sig_genes_in_hidden)
#############################################
centrality_genes_all_for_venn[centrality] = set(centality_sig_genes_in_hidden)
centrality_genes_6hr_hidden_for_venn[centrality] = set(sig_genes_6hr)&set(centality_sig_genes_in_hidden)
centrality_genes_24hr_hidden_for_venn[centrality] = set(sig_genes_24hr)&set(centality_sig_genes_in_hidden)
#########################################
for key, value in centrality_genes_in_hidden_dict.items():
print(key, len(value), value[:5])
print("common:", len(common_genes))
import toolbox.visual_utilities as vu
from venn import venn
import matplotlib.pyplot as plt
# vu.draw_venn_3group(centrality_genes_in_hidden_dict['RWR'],centrality_genes_in_hidden_dict['eigen'],centrality_genes_in_hidden_dict['degree'],group_labels=['RWR','eigen','degree'],
# save_addr= "/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Sig.Genes/keygenes_RWR_eigen_degree.png")
###########################
venn(centrality_genes_all_for_venn)
plt.savefig("/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Sig.Genes/keygenes_RWR_eigen_degree_bw.pdf")
plt.show()
venn(centrality_genes_24hr_hidden_for_venn)
plt.savefig(
"/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Sig.Genes/24hr_keygenes_RWR_eigen_degree_bw.pdf")
plt.show()
venn(centrality_genes_6hr_hidden_for_venn)
plt.savefig(
"/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Sig.Genes/6hr_keygenes_RWR_eigen_degree_bw.pdf")
plt.show()
# print(centrality_results_6hr)
def sort_hidden_by_localization(covid_network_hidden_keyGene, localization_bands):
key_gene_localization_df = pd.read_csv("/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/result/Circos/data/network_backbone_node_info_key_genes_moa_based_som_reverse_subcellular_conf4_v3.tsv",
sep='\t')
key_gene_localization_groupby = key_gene_localization_df.groupby('Protein')['Subcellular'].agg(list)
print(key_gene_localization_groupby)
subcellular = set(key_gene_localization_df['Subcellular'].to_list())
print(subcellular-set(localization_bands))
covid_network_hidden_keyGene.sort()
print(covid_network_hidden_keyGene[:5])
covid_network_hidden_keyGene_copy = covid_network_hidden_keyGene[:]
covid_network_hidden_keyGene_localizaition_nucleus_ER = []
covid_network_hidden_keyGene_localizaition_nucleus_no_ER = []
covid_network_hidden_keyGene_localizaition_nucleus_with_ER = []
covid_network_hidden_keyGene_localizaition_ER = []
covid_network_hidden_keyGene_localizaition_else = []
for localization in localization_bands[:1]:
for gene in covid_network_hidden_keyGene_copy:
if (localization in key_gene_localization_groupby[gene]) and (
localization_bands[1] not in key_gene_localization_groupby[gene]):
covid_network_hidden_keyGene_localizaition_nucleus_no_ER.append(gene)
elif (localization in key_gene_localization_groupby[gene]) and (
localization_bands[1] in key_gene_localization_groupby[gene]):
covid_network_hidden_keyGene_localizaition_nucleus_with_ER.append(gene)
covid_network_hidden_keyGene_copy = list(set(covid_network_hidden_keyGene_copy)-
set(covid_network_hidden_keyGene_localizaition_nucleus_no_ER)-
set(covid_network_hidden_keyGene_localizaition_nucleus_with_ER))
covid_network_hidden_keyGene_copy.sort()
for localization in localization_bands[1:2]:
for gene in covid_network_hidden_keyGene_copy:
if localization in key_gene_localization_groupby[gene]:
covid_network_hidden_keyGene_localizaition_ER.append(gene)
covid_network_hidden_keyGene_copy = list(set(covid_network_hidden_keyGene_copy)-set(covid_network_hidden_keyGene_localizaition_ER))
covid_network_hidden_keyGene_copy.sort()
covid_network_hidden_keyGene_localizaition_nucleus_ER += covid_network_hidden_keyGene_localizaition_nucleus_no_ER
covid_network_hidden_keyGene_localizaition_nucleus_ER += covid_network_hidden_keyGene_localizaition_nucleus_with_ER
covid_network_hidden_keyGene_localizaition_nucleus_ER += covid_network_hidden_keyGene_localizaition_ER
for localization in localization_bands[2:]:
for gene in covid_network_hidden_keyGene_copy:
if localization in key_gene_localization_groupby[gene]:
covid_network_hidden_keyGene_localizaition_else.append(gene)
covid_network_hidden_keyGene_copy = list(set(covid_network_hidden_keyGene_copy)-set(covid_network_hidden_keyGene_localizaition_else))
covid_network_hidden_keyGene_copy.sort()
covid_network_hidden_keyGene_localizaition_else += covid_network_hidden_keyGene_copy
covid_network_hidden_sorted = []
covid_network_hidden_sorted.append(covid_network_hidden_keyGene_localizaition_nucleus_ER)
covid_network_hidden_sorted.append(covid_network_hidden_keyGene_localizaition_else)
return covid_network_hidden_sorted
def sort_hidden_by_enrichedPaths(covid_network_hidden_keyGene):
hidden_enrichedPaths_dict = dh.load_obj("/Users/woochanghwang/PycharmProjects/LifeArc/COVID-19/data/hidden_to_keyPaths_dict")
hidden_sorted_dict = {}
hidden_met_rna = []
hidden_immune = []
hidden_cellcycle = []
hidden_others = []
covid_network_hidden_keyGene_copied = covid_network_hidden_keyGene[:]
for gene in covid_network_hidden_keyGene_copied:
if gene in hidden_enrichedPaths_dict["Met.RNA"]:
hidden_met_rna.append(gene)
elif gene in hidden_enrichedPaths_dict["CellCycle"]:
hidden_cellcycle.append(gene)
elif gene in hidden_enrichedPaths_dict["Immune"]:
hidden_immune.append(gene)
else:
hidden_others.append(gene)
hidden_sorted_dict["Met.RNA"] = sorted(hidden_met_rna,reverse=True)
hidden_sorted_dict["CellCycle"] = sorted(hidden_cellcycle,reverse=True)
hidden_sorted_dict["Immune"] = sorted(hidden_immune, reverse=True)
hidden_sorted_dict["Other"] = sorted(hidden_others,reverse=True)
covid_network_hidden_sorted = []
for label, genes in hidden_sorted_dict.items():
print(label, genes)
for gene in genes:
covid_network_hidden_sorted.append([label,gene])
return covid_network_hidden_sorted
def sort_hidden_by_centrality_rwr(gene_set, centrality_dict):
sorted_gene_set = []
gene_set_eigen = list(set(gene_set)&set(centrality_dict['key_eigen']))
gene_set_degree = list(set(gene_set)&set(centrality_dict['key_degree']))
gene_set_bw = list(set(gene_set)&set(centrality_dict['key_bw']))
gene_set_rwr = list(set(gene_set)&set(centrality_dict['key_rwr']))
gene_set_other = list(set(gene_set)-set(gene_set_eigen)-set(gene_set_degree)-set(gene_set_bw)-set(gene_set_rwr))
sorted_gene_set = sorted(gene_set_eigen,reverse=True) \
+ sorted(gene_set_degree,reverse=True) \
+ sorted(gene_set_bw,reverse=True) \
+ sorted(gene_set_rwr,reverse=True) \
+ sorted(gene_set_other,reverse=True)
return sorted_gene_set
def sort_hidden_by_enrichedPaths_centrality(covid_network_hidden_keyGene, centrality_dict):
hidden_enrichedPaths_dict = dh.load_obj("../Data/hidden_to_keyPaths_dict")
hidden_sorted_dict = {}
hidden_met_rna = []
hidden_immune = []
hidden_cellcycle = []
hidden_met_proteins = []
hidden_others = []
covid_network_hidden_keyGene_copied = covid_network_hidden_keyGene[:]
for gene in covid_network_hidden_keyGene_copied:
if gene in hidden_enrichedPaths_dict["Met.RNA"]:
hidden_met_rna.append(gene)
elif gene in hidden_enrichedPaths_dict["CellCycle"]:
hidden_cellcycle.append(gene)
elif gene in hidden_enrichedPaths_dict["Immune"]:
hidden_immune.append(gene)
elif gene in hidden_enrichedPaths_dict["Met.Proteins"]:
hidden_met_proteins.append(gene)
else:
hidden_others.append(gene)
hidden_sorted_dict["Met.RNA"] = sort_hidden_by_centrality_rwr(hidden_met_rna, centrality_dict)
hidden_sorted_dict["CellCycle"] = sort_hidden_by_centrality_rwr(hidden_cellcycle, centrality_dict)
hidden_sorted_dict["Immune"] = sort_hidden_by_centrality_rwr(hidden_immune, centrality_dict)
hidden_sorted_dict["Met.Proteins"] = sort_hidden_by_centrality_rwr(hidden_met_proteins, centrality_dict)
hidden_sorted_dict["Other"] = sort_hidden_by_centrality_rwr(hidden_others, centrality_dict)
covid_network_hidden_sorted = []
for label, genes in hidden_sorted_dict.items():
print(label, genes)
for gene in genes:
covid_network_hidden_sorted.append([label,gene])
return covid_network_hidden_sorted
def make_backbone_key_genes_file(virus, key_gene_SARS, circos_node_file_a):
graph_SARS = dh.load_obj(f"../result/{virus}/network/{virus}_All_Structure_All_Shortest_Paths_Graph")
covid_network_all_nodes = list(set(graph_SARS.nodes()))
covid_network_all_nodes = list(set(covid_network_all_nodes))
network_anaysis_df = pd.read_csv(f"../result/{virus}/network_analysis/{virus}_A549_24h_centrality_RWR_result_pvalue.csv")
eigen_list = network_anaysis_df[network_anaysis_df['Eigen_pvalue']< 0.01]['Gene'].tolist()
degree_list = network_anaysis_df[network_anaysis_df['Degree_pvalue']< 0.01]['Gene'].tolist()
bw_list = network_anaysis_df[network_anaysis_df['Between_plvaue']< 0.01]['Gene'].tolist()
rwr_list = network_anaysis_df[network_anaysis_df['RWR_pvalue']< 0.01]['Gene'].tolist()
key_genes = list(set(key_gene_SARS))
key_genes_eigen = list(set(key_gene_SARS) & set(eigen_list))
key_genes_degree = list(set(key_gene_SARS) & set(degree_list) - set(key_genes_eigen))
key_genes_bw = list(set(key_gene_SARS) & set(bw_list) - set(key_genes_eigen)-set(key_genes_degree))
key_genes_rwr = list(set(key_gene_SARS) & set(rwr_list) - set(key_genes_eigen)-set(key_genes_degree)-set(key_genes_bw))
centrality_dict = {
"key_genes": key_genes,
"key_eigen" : key_genes_eigen,
"key_degree" : key_genes_degree,
"key_bw" : key_genes_bw,
"key_rwr" : key_genes_rwr
}
print("key_target",len(key_genes), len(key_genes_eigen), len(key_genes_degree), len(key_genes_bw), len(key_genes_rwr))
dip_df = pd.read_csv(f"../Data/DIP/{virus}_DIP_no_duple.csv")
dip_protein = pd.read_csv(f"../Data/DIP/{virus}_DIP_no_duple.csv")['gene_name'].tolist()
covid_network_dip = list(set(covid_network_all_nodes)&set(dip_protein))
covid_network_dip_keyGene = list(set(covid_network_dip)&set(key_genes))
covid_network_dip_keyGene_eigen = list(set(covid_network_dip_keyGene)&set(key_genes_eigen))
covid_network_dip_keyGene_degree = list(set(covid_network_dip_keyGene)&set(key_genes_degree))
covid_network_dip_keyGene_bw = list(set(covid_network_dip_keyGene) & set(key_genes_bw))
covid_network_dip_keyGene_rwr = list(set(covid_network_dip_keyGene) & set(key_genes_rwr))
# print(len(covid_network_dip_keyGene),len(covid_network_dip_keyGene_degree),len(covid_network_dip_keyGene_bw), len(covid_network_dip_keyGene_eigen))
dep_SARS_protein = pd.read_csv(f"../Data/DEP/{virus}_DEP.csv")['Gene_name'].tolist()
dep_protein = list(set(dep_SARS_protein)-set(dip_protein)) #
covid_network_dep = list(set(covid_network_all_nodes)&set(dep_protein))
covid_network_dep_keyGene = list(set(covid_network_dep)&set(key_genes) - set(covid_network_dip_keyGene))
covid_network_dep_keyGene_eigen = list(set(covid_network_dep_keyGene) & set(key_genes_eigen))
covid_network_dep_keyGene_degree = list(set(covid_network_dep_keyGene) & set(key_genes_degree))
covid_network_dep_keyGene_bw = list(set(covid_network_dep_keyGene) & set(key_genes_bw))
covid_network_dep_keyGene_rwr = list(set(covid_network_dep_keyGene) & set(key_genes_rwr))
covid_network_hidden = list(set(covid_network_all_nodes)-set(dep_protein)-set(dip_protein))
covid_network_hidden_keyGene = list(set(covid_network_hidden)&set(key_genes))
# print(len(covid_network_all_nodes))
# print(len(key_genes), len(key_genes))
print(len(covid_network_dip_keyGene))
print(len(covid_network_dep_keyGene))
print(len(covid_network_hidden_keyGene))
##########
## DIP to sub structure
#########
# moa_to_pathway_groupby_df = moa_to_pathway_df.groupby('MoA_Category')['Pathways'].agg(list)
# print(dip_df)
dip_preygene_df = dip_df.groupby('bait_name')['gene_name'].agg(list)
print(dip_preygene_df)
dip_bait_gene_dict = dict()
for key,value in dip_preygene_df.items():
# print(key)
# string_gene = dip_stringgene_df[key]
# bait_gene = list(set(value).union(set(string_gene)))
dip_bait_gene_dict[key] = value
dip_bait_gene_dict_key_sorted = sorted(list(dip_bait_gene_dict.keys()),key=natural_keys)
dip_bait_gene_dict_sorted = dict()
for key in dip_bait_gene_dict_key_sorted:
# print(key)
dip_bait_gene_dict_sorted[key] = dip_bait_gene_dict[key]
# print(dip_bait_gene_dict_sorted.keys())
covid_network_dip_node_with_structure = []
covid_network_dip_coreGene_backup = covid_network_dip_keyGene[:]
for structure,bait_gene in dip_bait_gene_dict_sorted.items():
key_genes_in_structrure = list(set(covid_network_dip_keyGene)&set(bait_gene))
#######################
# for sort
#######################
structure_degree_gene = list(set(covid_network_dip_keyGene_degree)&set(key_genes_in_structrure))
structure_bw_gene = list(set(covid_network_dip_keyGene_bw)&set(key_genes_in_structrure))
structure_eigen_gene = list(set(covid_network_dip_keyGene_eigen)&set(key_genes_in_structrure))
structure_rwr_gene = list(set(covid_network_dip_keyGene_rwr)&set(key_genes_in_structrure))
structure_degree_gene.sort()
structure_bw_gene.sort()
structure_eigen_gene.sort()
structure_rwr_gene.sort()
for gene in structure_eigen_gene:
covid_network_dip_node_with_structure.append([structure,gene])
for gene in structure_degree_gene:
covid_network_dip_node_with_structure.append([structure, gene])
for gene in structure_bw_gene:
covid_network_dip_node_with_structure.append([structure, gene])
for gene in structure_rwr_gene:
covid_network_dip_node_with_structure.append([structure, gene])
########################
# covid_network_dip_keyGene = list(set(covid_network_dip_keyGene) - set(key_genes_in_structrure))
# print(dip_bait_gene_dict.keys())
# print(covid_network_dip_node_with_structure)
# print(len(covid_network_dip_node_with_structure))
# covid_network_dep_node = [['DEP',x] for x in covid_network_dep_keyGene]
################
# sort dep genes
################
covid_network_dep_keyGene_degree.sort()
covid_network_dep_keyGene_bw.sort()
covid_network_dep_keyGene_eigen.sort()
covid_network_dep_keyGene_rwr.sort()
covid_network_dep_keyGene_sorted = covid_network_dep_keyGene_eigen+covid_network_dep_keyGene_degree+covid_network_dep_keyGene_bw + covid_network_dep_keyGene_rwr
covid_network_dep_node = [['DEP', x] for x in covid_network_dep_keyGene_sorted]
###################
## HIDDEN
##############
# HIDDEN enriched pathways
##############
covid_network_hidden_sorted = sort_hidden_by_enrichedPaths_centrality(covid_network_hidden_keyGene, centrality_dict)
covid_network_background_node = []
covid_network_background_node += covid_network_dip_node_with_structure
covid_network_background_node += covid_network_hidden_sorted
covid_network_background_node += covid_network_dep_node
############################################
covid_network_background_df = | pd.DataFrame(covid_network_background_node, columns=['Mode','Gene']) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
import torch
import src.config as proj_config
class EventDataset(Dataset):
@staticmethod
def get_embeddings_vec(events, feature_name):
embeddings_vec = events[feature_name].values
embeddings = [torch.tensor(vec, dtype=torch.float64) for vec in embeddings_vec]
embeddings = torch.stack(embeddings, dim=0)
return embeddings
@staticmethod
def check_diff_in_days(day1, day2, diff):
dt1 = pd.to_datetime(day1, format='%Y/%m/%d')
dt2 = | pd.to_datetime(day2, format='%Y/%m/%d') | pandas.to_datetime |
import pandas as pd
import numpy as np
import warnings
from datetime import datetime
from typing import Optional, Union
class Genes:
"""Internal function that generate a pool of genes (search space) according to
the given parameters.
Parameters
----------
search_space : dict
Where keys are parameter names (strings)
and values are int, float or str.
Represents search space
over parameters of the provided estimator.
pop_size : int
Size of the initial population.
"""
def __init__(self, search_space: dict, pop_size: int):
cuerrent_time: str
if pop_size < 10:
cuerrent_time = datetime.now().strftime("%H:%M:%S")
warnings.warn(f'[{cuerrent_time}] Low Population Warning: Small initial population size maybe cause premature convergence. Please consider initializing a larger population size.')
def _is_all_str(dictionary):
for v in dictionary.values():
if all(isinstance(item, str) for item in v):
return True
return False
self.all_str = _is_all_str(search_space)
self.search_space = search_space
self.pop_size = pop_size
def _make_genes_numeric(self, lower: Optional[Union[int, float, str]], upper: Optional[Union[int, float, str]], prior: str, pop_size: int) -> pd.DataFrame:
"""Generate numerical values for solution candidates according to given statistical distribution.
Parameters
----------
lower : integer
Defines the lower boundaries of the distributions.
upper : integer
Defines the upper boundaries of the distributions.
pop_size : integer
A parameter to control the size of the population.
prior : string
A parameter that defines the sampling distribution.
"""
allele: np.array
mu: float
sigma: float
genes: Union[float, list]
# Check if upper is bigger than lower
if lower >= upper:
raise Exception('Upper must be larger than lower boundary.')
# Check if sampling distribution available
if prior not in ['normal', 'uniform', 'log-normal']:
raise Exception('Prior for search space must be "normal", "uniform", "log-normal".')
# Obtains mu and sigma from the interval
allele = np.arange(lower, upper)
mu = allele.mean()
sigma = allele.std()
# Sampling from normal distribution
if prior == 'normal':
genes = np.random.normal(loc=mu, scale=sigma, size=pop_size)
# Sampling from uniform distribution
elif prior == 'uniform':
genes = np.random.uniform(low=lower, high=upper, size=pop_size)
# Sampling from log-normal distribution
elif prior == 'log-normal':
genes = np.random.lognormal(mu=mu, sigma=sigma, size=pop_size)
genes = (genes - genes.min()) / (genes.max() - genes.min()) * (upper - lower) + lower
if isinstance(lower, int) and isinstance(upper, int):
genes = [int(round(i)) for i in genes]
return pd.DataFrame(genes, dtype='O')
def _make_genes_categorical(self, categories: tuple, pop_size: int) -> pd.DataFrame:
"""Randomly generate categorical values for solution candidates.
Parameters
----------
categories : tuple
Contains all possible categories for particular genes.
pop_size : int
A parameter to control the size of the population.
"""
categorical_genes: Union[np.array, pd.DataFrame]
categorical_genes = np.random.choice(a=categories, size=pop_size)
categorical_genes = | pd.DataFrame(categorical_genes) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Manipulação de dados - III
# ## Agregação e agrupamento
# ### Agregando informações de linhas ou colunas
#
# Para agregar informações (p.ex. somar, tomar médias etc.) de linhas ou colunas podemos utilizar alguns métodos específicos já existentes em *DataFrames* e *Series*, tais como `sum`, `mean`, `cumsum` e `aggregate` (ou equivalentemente `agg`):
# In[1]:
import pandas as pd
import numpy as np
dados_covid_PB = pd.read_csv('https://superset.plataformatarget.com.br/superset/explore_json/?form_data=%7B%22slice_id%22%3A1550%7D&csv=true',
sep=',', index_col=0)
# In[2]:
dados_covid_PB.agg(lambda vetor: np.sum(vetor))[['casosNovos','obitosNovos']].astype('int')
# Podemos conferir esta agregação resultante com o número de casos acumulados e óbitos acumulados
# In[3]:
dados_covid_PB.head()
# Isto também pode ser obtido utilizando o método `sum` de *DataFrames* e *Series*:
# In[4]:
dados_covid_PB[['casosNovos','obitosNovos']].sum()
# Podemos recriar a coluna `'obitosAcumulados'` com o método `cumsum` (soma cumulativa):
# In[5]:
dados_covid_PB.obitosNovos.sort_index().cumsum()
# ### Selecionando entradas distintas
#
# Para selecionar entradas distintas utilizamos o método `drop_duplicate`. Aqui, para exemplificar, vamos utilizar o banco de dados oficial sobre COVID no Brasil:
# In[6]:
# pode levar um tempo para ler...
covid_BR = pd.read_excel('../database/HIST_PAINEL_COVIDBR_18jul2020.xlsx')
# In[7]:
covid_BR.tail(3)
# In[8]:
# resumo da tabela
covid_BR.info()
# In[9]:
# todos os estados únicos
covid_BR.estado.drop_duplicates().array
# In[10]:
# ordena alfabeticamente
covid_BR.estado.drop_duplicates().dropna().sort_values().array
# ### Agrupando dados por valores em colunas e agregando os resultados
#
# Vamos determinar uma coluna para agrupar. Consideraremos o *DataFrame* `covid_BR`e selecionaremos os estados *PB*, *PE*, *RJ* e *SP* para realizar análises agrupando os resultados por estados.
# In[11]:
covid_BR.query('estado in ["PB", "PE", "RJ", "SP"]')
# Inspecionando o conjunto de dados, observamos que os dados para estado são apresentados com o valor `NaN` para `codmun` e quando `codmun` possui um valor diferente de `NaN`, o resultado é apenas para o município do código em questão.
#
# Como estamos interessados nos valores por estado, vamos selecionar apenas os dados com `codmun` contendo `NaN`.
# In[12]:
covid_estados = covid_BR.query('estado in ["PB", "PE", "RJ", "SP"]')
covid_apenas_estados = covid_estados.loc[covid_estados['codmun'].isna()]
# Vamos agora selecionar apenas as colunas de interesse. Para tanto, vejamos os nomes das colunas:
# In[13]:
covid_apenas_estados.columns
# In[14]:
covid_apenas_estados = covid_apenas_estados[['estado', 'data', 'casosNovos', 'obitosNovos']]
# A data parece ser o *index* natural, já que o *index* atual não representa nada. Observe que teremos *index* repetidos, pois teremos as mesmas datas em estados diferentes.
# In[15]:
covid_apenas_estados
# In[16]:
covid_apenas_estados = covid_apenas_estados.set_index('data')
# In[17]:
covid_apenas_estados
# ### Agrupando com o método *groupby*
#
# Podemos escolher uma (ou mais colunas, incluindo o índice) para agrupar os dados. Ao agruparmos os dados, receberemos um objeto do tipo `DataFrameGroupBy`. Para vermos os resultados, devemos agregar os valores:
# In[18]:
covid_estados_agrupado = covid_apenas_estados.groupby('estado')
# In[19]:
covid_estados_agrupado.sum().rename({'casosNovos':'Casos Totais', 'obitosNovos':'Obitos Totais'},axis=1)
# Podemos agrupar por mais de uma coluna. Vamos fazer dois grupos. *grupo_1* formado por PB e PE e *grupo_2* formado por RJ e SP. Em seguida, vamos agrupar por grupo e por data:
# In[20]:
covid_estados_grupos = covid_apenas_estados.copy()
col_grupos = covid_estados_grupos.estado.map(lambda estado: 'grupo_1' if estado in ['PB','PE']
else 'grupo_2')
covid_estados_grupos['grupo'] = col_grupos
# In[21]:
covid_estados_grupos
# Agora vamos agrupar e agregar:
# In[22]:
covid_grupo_agrupado = covid_estados_grupos.groupby(['grupo','data'])
# In[23]:
covid_grupo_agrupado.sum()
# ### Mesclando *DataFrames*
#
# Vamos agora ver algumas formas de juntar dois ou mais *DataFrames* com *index* ou colunas em comum para formar um novo *DataFrame*.
#
# #### Mesclando *DataFrames* através de concatenações
#
# Concatenar nada mais é do que "colar" dois ou mais *DataFrames*. Podemos concatenar por linhas ou por colunas.
#
# A função que realiza a concatenação é `concat`. Os dois argumentos mais utilizados são a lista de *DataFrames* a serem concatenados e `axis`, onde `axis = 0` indica concatenação por linha (um *DataFrame* "embaixo" do outro) e `axis=1` indica concatenação por coluna (um *DataFrame* ao lado do outro).
# Relembre do *DataFrame* `df_dict_series`:
# In[24]:
df_dict_series = pd.read_csv('../database/df_dict_series.csv')
# Vamos criar um novo, com novas pessoas:
# In[25]:
serie_Idade_nova = pd.Series({'Augusto':13, 'André': 17, 'Alexandre': 45}, name="Idade")
serie_Peso_novo = pd.Series({'Augusto':95, 'André': 65, 'Alexandre': 83}, name="Peso")
serie_Altura_nova = pd.Series({'Augusto':192, 'André': 175, 'Alexandre': 177}, name="Altura")
serie_sobrenome = pd.Series({'Augusto':'Castro', 'André':'Castro', 'Alexandre':'Castro'}, name='Sobrenome')
dicionario_novo = {'Sobrenome':serie_sobrenome, 'Peso': serie_Peso_novo,
'Idade': serie_Idade_nova, 'Altura': serie_Altura_nova}
df_novo = | pd.DataFrame(dicionario_novo) | pandas.DataFrame |
"""
This script creates a boolean mask based on rules
1. is it boreal forest zone
2. In 2000, was there sufficent forest
"""
#==============================================================================
__title__ = "FRI calculator for the other datasets"
__author__ = "<NAME>"
__version__ = "v1.0(21.08.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
# import rasterio
import xarray as xr
from dask.diagnostics import ProgressBar
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import glob
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# import regionmask as rm
# import itertools
# Import debugging packages
import ipdb
# from rasterio.warp import transform
from shapely.geometry import Polygon
# import geopandas as gpd
# from rasterio import features
# from affine import Affine
# +++++ Import my packages +++++
import myfunctions.corefunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
#==============================================================================
def main():
# ========== Setup the paths ==========
TCF = 10
# TCF = 50
dpath, chunksize = syspath()
data = datasets(dpath, chunksize, TCF=TCF)
# ========== select and analysis scale ==========
mwbox = [1]#, 2, 5]#, 10] #in decimal degrees
maskds = "esacci"
maskforce = False # Added to allow skiping of the mask
for dsn in data:
print(dsn)
# ========== Set up the filename and global attributes =========
if dsn.startswith("HANSEN"):
ppath = dpath + "/BurntArea/HANSEN/FRI/"
force = True
else:
ppath = dpath + "/BurntArea/%s/FRI/" % dsn
force = False
cf.pymkdir(ppath)
# ========== Get the dataset =========
mask = landseamaks(data, dsn, dpath, maskforce )
# ========== Calculate the annual burn frewuency =========
ds_ann = ANNcalculator(data, dsn, mask, force, ppath, dpath, chunksize, TCF)
# force = True
# breakpoint()
# ========== work out the FRI ==========
FRIcal(ds_ann, mask, dsn, force, ppath, dpath, mwbox, data, chunksize, TCF)
# force = False
print(dsn, " Complete at:", pd.Timestamp.now())
ipdb.set_trace()
#==============================================================================
def FRIcal(ds_ann, mask, dsn, force, ppath, dpath, mwbox, data, chunksize, TCF):
""""""
""" Function to caluclate the FRI at different resolutions """
# ========== Add a loading string for forest cover in Hansen Datasets ==========
if dsn.startswith("HANSEN") and (TCF > 0.):
tcfs = "_%dperTC" % np.round(TCF)
else:
tcfs = ""
# ========== Setup a working path ==========
tpath = ppath+"tmp/"
# ========== work out the ration ==========
pix = abs(np.unique(np.diff(ds_ann.latitude.values))[0])
# ds_ann = ds_ann.chunk({"latitude":chunksize, "longitude":-1})
print(f"Loading annual data into ram at: { | pd.Timestamp.now() | pandas.Timestamp.now |
__author__ = '<NAME>'
import numpy as np
import pandas as pd
import random
import platform
import os
if platform.system() == 'Linux':
import xlsxwriter # support saving xlsx file in unix-domain systems
def makedir(save):
"""
Create required directory if not exists
Args:
population: population which will be saved in save function
file_name: file created inside directory
"""
def wrapper(*args):
directory = os.path.join(os.getcwd(), 'datasets')
if not os.path.exists(directory):
os.makedirs(directory)
save(*args)
return wrapper
class Configuration_Executer(object):
'''
This class provide functional operations on given configuration dataset
whose will give reasonable output after transformations
'''
def __init__(self, population_size, chromosome_size, equal_chromosomes, initialization_method, representation, saving_method):
try:
self.population_size = int(population_size)
except ValueError as v:
print("Wrong type of population_size input, check the DEFAULTS for more info")
raise
try:
self.chromosome_size = int(chromosome_size)
except ValueError as v:
raise v("Wrong type of chromosome_size input, check the DEFAULTS for more info")
try:
self.equal_chromosomes = bool(equal_chromosomes)
except ValueError as v:
print("Wrong type of equal_chromosomes input, check the DEFAULTS for more info")
raise
self.initialization_method = initialization_method
self.representation = representation
self.saving_method = saving_method
def count_chromosomes(self, chromosome_layer : str) -> int:
try:
count = int(input('How many chromosomes in {0} layer, full chromosome length is {1} : '.format(chromosome_layer, self.chromosome_size)))
if count > self.chromosome_size:
count = self.chromosome_size
if count < 0:
raise Exception('Numer of chromosomes cannot be negative value')
return count
except ValueError as v:
raise Exception('Number of chromosomes in each layer must be an integer value!')
def random_initialization(self):
population = | pd.DataFrame() | pandas.DataFrame |
import math
import load_data
import pickle
import pandas as pd
import numpy as np
import datetime
from collections import deque
import scipy.stats as st
import ast
import astpretty
import re
def main():
# Used first in Organization.ipynb
print('\nCell Output')
get_cell_output()
print('\nCell Stats')
get_cell_stats()
print('\nCell Order')
get_cell_order()
print('\nCell Types')
get_cell_types()
print('\nComments')
get_comments()
# Used first in Packages.ipynb
print('\nGet Imports')
get_nb_imports()
print('\nGet Code')
get_nb_code()
print('\nGetting nb_imports_code_df')
nb_imports_code_df = load_data.load_nb_imports(code = True)
print('\nnb_imports_code_df loaded')
cell_types_df = load_data.load_cell_types()
print('\ncell_types loaded')
cell_stats_df = load_data.load_cell_stats()
print('\ncell_stats loaded')
cell_info_code_df = cell_types_df.merge(
cell_stats_df, on = 'file'
).merge(
nb_imports_code_df.rename(columns={'code':'code_list'}), on = 'file'
)
print('\ndfs combined')
#Used first in APIs.ipynb
print('\nGet Objects')
get_all_objects(cell_info_code_df)
print('\nGet Lines Per Code Cell')
get_lines_per_code_cell(cell_info_code_df)
print('\nGet Function Definitions')
get_function_defs(cell_info_code_df)
print('\nGet Function Uses')
get_function_use(cell_info_code_df)
print('\nSeparate User-defined functions from not user-defined')
add_user_funcs()
# Used first in Struggles.ipynb
print('\nGet Erros')
get_errors()
print('\nGet Statuses')
get_statuses()
# Used first in Visualizations.ipynb
print('\nGet Visualization Uses')
get_vis_uses(nb_imports_code_df)
print('\nAdd Visualization Uses to Notebooks')
get_vis_uses_nb(nb_imports_code_df)
# Used first in Models.ipynb
print('\nGet Framework Uses')
get_framework_uses(nb_imports_code_df)
print('\nGet Magic')
get_magic()
def get_magic():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0,
usecols = ['file','cell_id','code'],
chunksize = 10000
)
def aggregate_special_lines(list_of_lines_of_code):
return [
l
for l in load_data.flatten([l.split('\n') for l in list_of_lines_of_code if str(l) != 'nan'])
if l.startswith('%') or '!' in l or
l.startswith('?') or l.endswith('?')
]
special_dfs = []
i = 0
start = datetime.datetime.now()
i = len(special_dfs)
for chunk in df_chunks:
df = chunk.groupby('file')['code'].aggregate(
aggregate_special_lines
).reset_index()
special_dfs.append(df)
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i+=1
end = datetime.datetime.now()
print('Chunks done in', end - start)
start = datetime.datetime.now()
special_df = pd.concat(
special_dfs,
sort = False
).reset_index(drop = True).groupby('file')['code'].aggregate(
load_data.flatten
).reset_index()
end = datetime.datetime.now()
print('Combined in', end - start)
start = datetime.datetime.now()
f = open('analysis_data/special_functions.df', 'wb')
pickle.dump(special_df, f)
f.close()
end = datetime.datetime.now()
print('Saved in', end - start)
def get_nb_code():
start = datetime.datetime.now()
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = ['file','code','cell_type'],
chunksize=10000
)
# 25 minutes
start = datetime.datetime.now()
i = 0
code_dfs = []
for chunk in df_chunks:
code_dfs.append(
chunk[chunk.cell_type == 'code'].groupby('file')['code'].aggregate(lambda x: list(x)).reset_index()
)
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i += 1
end = datetime.datetime.now()
print('Chunks', end - start)
code_df = pd.concat(code_dfs, sort = False).reset_index(drop=True)
start = datetime.datetime.now()
code_df = code_df.groupby('file')['code'].aggregate(load_data.flatten).reset_index()
end = datetime.datetime.now()
print('Combined', end - start)
print('now saving')
start = datetime.datetime.now()
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
try:
f = open('analysis_data/nb_code.df', 'wb')
pickle.dump(code_df, f)
f.close()
print('saved to pickle')
except:
code_df.to_csv('analysis_data/nb_code.csv', index = False)
print('saved to csv')
end = datetime.datetime.now()
print(end - start)
def get_all_objects(cell_info_code_df):
# 1.5 hours
start = datetime.datetime.now()
all_objects = []
unprocessed = 0
target_types = [ast.Name,ast.Tuple,
ast.Attribute, ast.Subscript,
ast.List
]
for i, row in cell_info_code_df.iterrows():
o = {
'file': row['file'],
'objects': []
}
try:
all_code = '\n'.join([
c for c in '\n'.join([l for l in row.code_list if type(l) == str]).split('\n')
if (c != '' and not c.strip().startswith('%') and
not c.strip().startswith('?') and not c.strip().startswith('!')
)
])
tree = ast.parse(all_code)
except Exception as e:
all_objects.append(o)
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start, unprocessed, 'unprocessed')
continue
for t in tree.body:
if type(t) == ast.Assign:
value_type = type(t.value)
for target in t.targets:
if type(target) in [ast.Tuple, ast.List]:
for node in ast.walk(target):
if type(node) == ast.Name:
o['objects'].append((node.id, value_type))
else:
if type(target) == ast.Name:
for node in ast.walk(target):
if type(node) == ast.Name:
o['objects'].append((node.id, value_type))
all_objects.append(o)
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Found objects', end - start)
all_objects_df = pd.DataFrame(all_objects)
# 14 seconds
start = datetime.datetime.now()
f = open('analysis_data/all_objects.df', 'wb')
pickle.dump(all_objects_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_lines_per_code_cell(cell_info_code_df):
# 12.5 minutes
start = datetime.datetime.now()
lines_per_code_cell = [
row['lines_of_code'] / row['code']
for i, row in cell_info_code_df.iterrows()
if row['code'] != 0
]
end = datetime.datetime.now()
print('Calculated', end - start)
# 0.2 seconds
start = datetime.datetime.now()
f = open('analysis_data/lines_per_code_cell.list', 'wb')
pickle.dump(lines_per_code_cell, f)
f.close()
end = datetime.datetime.now()
print('Saved',end - start)
def get_function_use(cell_info_code_df):
'''
Get all function calls from a python file
The MIT License (MIT)
Copyright (c) 2016 <NAME> <<EMAIL>>
'''
class FuncCallVisitor(ast.NodeVisitor):
def __init__(self):
self._name = deque()
@property
def name(self):
return '.'.join(self._name)
@name.deleter
def name(self):
self._name.clear()
def visit_Name(self, node):
self._name.appendleft(node.id)
def visit_Attribute(self, node):
try:
self._name.appendleft(node.attr)
self._name.appendleft(node.value.id)
except AttributeError:
self.generic_visit(node)
def get_func_calls(tree):
func_calls = []
for node in ast.walk(tree):
if isinstance(node, ast.Call):
callvisitor = FuncCallVisitor()
callvisitor.visit(node.func)
func_calls.append((callvisitor.name, [type(a) for a in node.args]))
return func_calls
# 1 hour 45 minutes
start = datetime.datetime.now()
function_use = {
'functions': [],
'parameters': [],
'file': []
}
unprocessed = 0
for i, row in cell_info_code_df.iterrows():
nb_funcs = []
nb_params = []
try:
all_code = '\n'.join([c for c in '\n'.join([l for l in row.code_list if str(l) != 'nan']).split('\n') if (c != '' and
str(c) != 'nan' and not c.strip().startswith('%') and not c.strip().startswith('?') and
not c.strip().startswith('!'))])
tree = ast.parse(all_code)
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
for t in tree.body:
try:
for f in get_func_calls(t):
if f[0] not in nb_funcs:
nb_funcs.append(f[0])
nb_params.append(len(f[1]))
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
function_use['functions'].append(nb_funcs)
function_use['parameters'].append(nb_params)
function_use['file'].append(row['file'])
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Gone through for function uses', end - start)
function_use_df = pd.DataFrame(function_use)
# 48 seconds
start = datetime.datetime.now()
f = open('analysis_data/nb_function_use.df', 'wb')
pickle.dump(function_use_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def get_function_defs(cell_info_code_df):
start = datetime.datetime.now()
unprocessed = 0
function_defs = {
'function': [],
'parameters':[],
'file': []
}
for i, row in cell_info_code_df.iterrows():
try:
all_code = '\n'.join([c for c in '\n'.join([l for l in row.code_list if str(l) != 'nan']).split('\n') if (c != '' and
str(c) != 'nan' and not c.strip().startswith('%') and not c.strip().startswith('?') and
not c.strip().startswith('!'))])
tree = ast.parse(all_code)
except:
unprocessed += 1
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
continue
for t in tree.body:
if type(t) == ast.FunctionDef:
name = t.name
num_args = 0
for a in ast.walk(t.args):
if type(a) == ast.arg:
num_args += 1
function_defs['function'].append(name)
function_defs['parameters'].append(num_args)
function_defs['file'].append(row.file)
elif type(t) == ast.ClassDef:
name = t.name
num_args = 0
for b in t.body:
if type(b) == ast.FunctionDef and b.name == '__init__':
for a in ast.walk(b.args):
if type(a) == ast.arg and a.arg != 'self':
num_args += 1
elif type(b) == ast.FunctionDef:
name_b = name+"."+b.name
num_args_b = 0
for a in ast.walk(b.args):
if type(a) == ast.arg and a.arg != 'self':
num_args_b += 1
function_defs['function'].append(name_b)
function_defs['parameters'].append(num_args_b)
function_defs['file'].append(row.file)
function_defs['function'].append(name)
function_defs['parameters'].append(num_args)
function_defs['file'].append(row.file)
if i%200000 == 0:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print('Through cell_info_code for functions', end - start)
start = datetime.datetime.now()
function_defs_df = pd.DataFrame(function_defs)
f = open('analysis_data/function_defs.df', 'wb')
pickle.dump(function_defs_df, f)
f.close()
end = datetime.datetime.now()
print('Saved', end - start)
def add_user_funcs():
notebooks = load_data.load_notebooks()
cell_stats_df = load_data.load_cell_stats()
cell_types_df = load_data.load_cell_types()
function_defs_df = load_data.load_function_defs()
function_use_df = load_data.load_function_use()
print('grouping...')
start = datetime.datetime.now()
function_defs_nb_df = function_defs_df.groupby('file')['function'].aggregate(lambda x: list(x)).reset_index().rename(columns={'function':'function_defs'})
end = datetime.datetime.now()
print('...grouped', end - start)
print('merging...')
start = datetime.datetime.now()
functions_df = function_use_df.merge(function_defs_nb_df, on = 'file', how = 'left')
functions_df.function_defs.loc[functions_df.function_defs.isna()] = [[]]*sum(functions_df.function_defs.isna())
end = datetime.datetime.now()
print('...merged', end - start)
start = datetime.datetime.now()
all_def = []
all_not = []
for i, row in functions_df.iterrows():
def_uses = [f for f in row.functions if f in row.function_defs]
not_uses = [f for f in row.functions if f not in row.function_defs]
all_def.append(def_uses)
all_not.append(not_uses)
if i%100000 == 0 or i == 100 or i == 1000:
print(i, datetime.datetime.now() - start)
end = datetime.datetime.now()
print(end - start)
function_use_df['user_def'] = all_def
function_use_df['not_user_def'] = all_not
t = datetime.datetime.now()
print('Added to df', t - end)
f = open('analysis_data/nb_function_use.df', 'wb')
pickle.dump(function_use_df, f)
f.close()
print('Saved', datetime.datetime.now() - t)
print('DONE')
def get_errors():
df_chunks = pd.read_csv(
'data_final/cells_final.csv',
header = 0, usecols = ['file','num_error','error_names','cell_id'],
chunksize=10000
)
# 25 minutes
start = datetime.datetime.now()
error_dfs = []
i = 0
for chunk in df_chunks:
try:
load_data.string_to_list(chunk, 'error_names')
error_dfs.append(
chunk.groupby('file')['num_error'].aggregate(['sum','count']).reset_index().merge(
chunk.groupby('file')['error_names'].aggregate(load_data.flatten).reset_index(),
on = 'file'
)
)
except Exception:
print(i, type(chunk))
if i%1000 == 0:
print(i, datetime.datetime.now() - start)
i+=1
end = datetime.datetime.now()
print('Chunks', end - start)
error_df = pd.concat(error_dfs, sort = False).reset_index(drop=True)
start = datetime.datetime.now()
error_df = error_df.groupby('file')['count'].sum().reset_index().merge(
error_df.groupby('file')['error_names'].aggregate(load_data.flatten).reset_index(),
on = 'file'
)
end = datetime.datetime.now()
print('Combined', end - start)
# 5 seconds
start = datetime.datetime.now()
f = open('analysis_data/error.df', 'wb')
pickle.dump(error_df, f)
f.close
end = datetime.datetime.now()
print('Saved', end - start)
def get_vis_uses_nb(nb_imports_code_df):
notebooks = | pd.read_csv('data_final/notebooks_final.csv') | pandas.read_csv |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
from copy import deepcopy
from functools import reduce
from typing import TypeVar, List, Generic, Type
import numpy as np
import pandas as pd
from hadar.optimizer.domain.input import Study
from hadar.optimizer.domain.output import Result
__all__ = ["ResultAnalyzer", "NetworkFluentAPISelector"]
T = TypeVar("T")
class Index(Generic[T]):
"""
Generic Index to use to select and rank data.
"""
def __init__(self, column, index=None):
"""
Initiate instance.
:param column: column name link to this index
:param index: list of index or element to filter from data. None by default to say keep all data.
"""
self.column = column
if index is None:
self.all = True
elif isinstance(index, list):
self.index = tuple(index)
self.all = len(index) == 0
elif not isinstance(index, tuple):
self.index = tuple([index])
self.all = False
else:
self.index = index
self.all = False
def filter(self, df: pd.DataFrame) -> pd.Series:
"""
Filter dataframe. Filter columns with columns attributes with index values.
:param df: dataframe to filter
:return: Series of boolean to set row to keep
"""
if self.all:
return df[self.column].notnull()
return df[self.column].isin(self.index)
def is_alone(self) -> bool:
"""
Ask if index filter element is alone.
:return: if index filter only one value return True else False
"""
return not self.all and len(self.index) <= 1
class ProdIndex(Index[str]):
"""Index implementation to filter productions"""
def __init__(self, index):
Index.__init__(self, column="name", index=index)
class ConsIndex(Index[str]):
""" Index implementation to filter consumptions"""
def __init__(self, index):
Index.__init__(self, column="name", index=index)
class StorIndex(Index[str]):
""" Index implementation to filter storage"""
def __init__(self, index):
Index.__init__(self, column="name", index=index)
class LinkIndex(Index[str]):
"""Index implementation to filter destination node"""
def __init__(self, index):
Index.__init__(self, column="dest", index=index)
class SrcConverter(Index[str]):
"""Index implementation to filter source converter"""
def __init__(self, index):
Index.__init__(self, column="name", index=index)
class DestConverter(Index[str]):
"""Index implementation to filter destination converter"""
def __init__(self, index):
Index.__init__(self, column="name", index=index)
class NodeIndex(Index[str]):
"""Index implementation to filter node"""
def __init__(self, index):
Index.__init__(self, column="node", index=index)
class NetworkIndex(Index[str]):
"""Index implementation fo filter network"""
def __init__(self, index):
Index.__init__(self, column="network", index=index)
class IntIndex(Index[int]):
"""Index implementation to handle int index with slice"""
def __init__(self, column: str, index):
"""
Create instance.
:param index: one element or list on element to filter.
:param start: start datetime to filter (to use instead of index)
:param end: end datetime to filter (to use instead of index)
"""
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = -1 if index.start is None else index.stop
step = 1 if index.step is None else index.step
index = tuple(range(start, stop, step))
Index.__init__(self, column=column, index=index)
class TimeIndex(IntIndex):
"""Index implementation to filter by time step"""
def __init__(self, index):
IntIndex.__init__(self, column="t", index=index)
class ScnIndex(IntIndex):
"""index implementation to filter by scenario"""
def __init__(self, index):
IntIndex.__init__(self, column="scn", index=index)
class ResultAnalyzer:
"""
Single object to encapsulate all postprocessing aggregation.
"""
def __init__(self, study: Study, result: Result):
"""
Create an instance.
:param study: study to use
:param result: result of study used
"""
self.result = result
self.study = study
self.consumption = ResultAnalyzer._build_consumption(self.study, self.result)
self.production = ResultAnalyzer._build_production(self.study, self.result)
self.storage = ResultAnalyzer._build_storage(self.study, self.result)
self.link = ResultAnalyzer._build_link(self.study, self.result)
self.src_converter = ResultAnalyzer._build_src_converter(
self.study, self.result
)
self.dest_converter = ResultAnalyzer._build_dest_converter(
self.study, self.result
)
@staticmethod
def _build_consumption(study: Study, result: Result):
"""
Flat all data to build global consumption dataframe
columns: | cost | name | node | network | asked | given | t | scn |
"""
h = study.horizon
scn = study.nb_scn
elements = sum(
[
sum([len(n.consumptions) for n in net.nodes.values()])
for net in study.networks.values()
]
)
size = scn * h * elements
cons = {
"cost": np.empty(size, dtype=float),
"asked": np.empty(size, dtype=float),
"given": np.empty(size, dtype=float),
"name": np.empty(size, dtype=str),
"node": np.empty(size, dtype=str),
"network": np.empty(size, dtype=str),
"t": np.empty(size, dtype=float),
"scn": np.empty(size, dtype=float),
}
cons = | pd.DataFrame(data=cons) | pandas.DataFrame |
import streamlit as st
import json
import ast
import os
import datetime
from new_card_transformation import transform_card
import pandas as pd
import numpy as np
from functions import obtain_unique_values, load_models, predict_dummy_binary, predict_dummy_multiclass, predict_dummy_numeric
from streamlit_player import st_player
import nltk
# Download nltk english stopwords
nltk.download('stopwords')
# Show Magic's logo
st.sidebar.image('./static/Magic-The-Gathering-Logo.png', use_column_width=True)
# Title to display on the App
st.markdown("""
# Magic The Gathering
### Artificial Intelligence Models
Welcome to our **Magic: The Gathering card prediction models** with **Artificial Intelligence**!
*Our vision is to bring AI to Magic and help evolving this amazing game!*
""")
col4, col5 = st.columns(2)
# If they check this button, we show a much detailed description
if col4.checkbox('<<< HOW DOES IT WORK?'):
st.markdown("""
This site serves as demonstration to many Machine Learning models that are trained using the information from cards the Scryfall Magic The Gathering API.
**The process goes something like this**: First we get all Magic cards from the Scryfall API, we then transform those cards into data prepared for machine learning,
converting each card into hundreds of different data points. That's when we proceed to *show* that data to differe tmachine learning models alongside with the target
we want to predict. The model will start learning patterns hidden within the data and improve with the more data we feed it, until it's able by itself to provide
accurate predictions.
As of today, we have 3 types of models and use 3 different algorithms:
* **BINARY** models, **MULTICLASS** models and **NUMERIC** models, to define the type of prediction we want to do (a YES or NO question vs a specific category prediction)
* **Logistic/Linear Regression**, **Gradient Boosting** and **Deep Learning** (Embeddings and bidirectional LSTM network)
If you want to learn more about the process, [**here's an article**](https://towardsdatascience.com/artificial-intelligence-in-magic-the-gathering-4367e88aee11)
**How do you start testing the models?**
* **<<<** Look at the sidebar on the left. There's where you pick a card or create a card yourself!
* Select the data source
* FROM JSON: you provide the JSON file with your card (you can also download the template).
* USE FORM: you complete a form with your own card data, starting from a template.
* FROM DB: you load a REAL card from the app's database. You can also modify the card!
* **vvv** Look down below: Select the model you want to use
* Run the model with the **EXECUTE MODEL** button
""")
# Link to the YouTube channel
link = '[Watch all the videos in the YouTube channel](https://www.youtube.com/channel/UC3__atAqSUrIMNLg_-6aJBA)'
col5.markdown(link, unsafe_allow_html=True)
# Embed a video tutorial
st_player("https://youtu.be/30_tT_R7qtQ") # youtube video tutorial
# st.video("./static/Media1.mp4", format="video/mp4")
# Load the Card DB
card_db = pd.read_csv("./datasets/WEBAPP_datasets_vow_20211220_FULL.csv")
# Obtain the unique values from "keywords" column
keyword_unique_list = obtain_unique_values(card_db, 'keywords')
# Obtain the unique values from "set_type" column
settype_unique_list = list(card_db['set_type'].unique())
# Model selection
with open('./models/model_marketplace.json') as json_file:
model_dict = json.load(json_file)
# Title for model section
st.write("### Use the models")
# Allow the user to select a model
model_selected = st.selectbox('Select your model', list(model_dict.keys()))
# Get all the data of the model selected
model = model_dict[model_selected]['MODEL']
# Print some description of the selected model to the user
col1, col2, col3 = st.columns(3)
col1.write(f"""**Model Family**: {model_dict[model_selected]['MODEL FAMILY']}""")
col2.write(f"""**Model Question**: {model_dict[model_selected]['MODEL QUESTION']}""")
col3.write(f"""**Model Description**: {model_dict[model_selected]['MODEL DESCRIPTION']}""")
# Source Selection
source_selection = st.sidebar.radio('Select a new card!', ['FROM JSON', 'USE FORM', 'FROM DB'])
# FROM JSON
if source_selection == 'FROM JSON':
# Load a new JSON with a card
json_file = st.sidebar.file_uploader('Upload a card in JSON format')
# Load the sample JSON to allow user to download a sample file
with open("./dummy/sample.json", encoding="utf-8") as jsonFile:
sample_json = json.load(jsonFile)
# Allow the user to download the sample in JSON format
st.sidebar.download_button('Download JSON sample', json.dumps(sample_json, ensure_ascii=False), file_name="sample.json")
try:
new_card = json.load(json_file)
st.sidebar.write(new_card)
except:
pass
# FROM FORM
else:
if source_selection == 'USE FORM':
name = st.sidebar.text_input('Cardname', value="Normal Creature")
lang = st.sidebar.selectbox('Language', sorted(card_db['lang'].unique()))
released_at = st.sidebar.date_input('Released at', value=datetime.datetime(2021, 9, 24))
mana_cost = st.sidebar.text_input('Mana Cost', value="{3}{W}")
cmc = st.sidebar.number_input('CMC', value=4)
type_line = st.sidebar.text_input('Card Type', value="Creature — Warrior")
oracle_text = st.sidebar.text_area('Oracle Text', value="Vigilance\nWhenever cardname attacks, put a +1/+1 counter on it")
oracle_text_1 = st.sidebar.text_area('DFC: Oracle Text Face', value="None")
oracle_text_2 = st.sidebar.text_area('DFC: Oracle Text Back', value="None")
power = st.sidebar.select_slider('Power', [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"None"], value=5)
toughness = st.sidebar.select_slider('Toughness', [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"None"], value=3)
colors = st.sidebar.multiselect('Colors', ['W', 'U', 'B', 'R', 'G'], default=["W"])
color_identity = st.sidebar.multiselect('Color Identity', ['W', 'U', 'B', 'R', 'G'], default=["W"])
keywords = st.sidebar.multiselect('Keywords', keyword_unique_list, default=["Vigilance"])
legality_standard = st.sidebar.select_slider('Standard Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_alchemy = st.sidebar.select_slider('Alchemy Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_future = st.sidebar.select_slider('Future Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_historic = st.sidebar.select_slider('Historic Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_gladiator = st.sidebar.select_slider('Gladiator Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_pioneer = st.sidebar.select_slider('Pioneer Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_modern = st.sidebar.select_slider('Modern Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_legacy = st.sidebar.select_slider('Legacy Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_pauper = st.sidebar.select_slider('Pauper Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="not_legal")
legality_vintage = st.sidebar.select_slider('Vintage Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_penny = st.sidebar.select_slider('Penny Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_commander = st.sidebar.select_slider('Commander Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_brawl = st.sidebar.select_slider('Brawl Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_histbrawl = st.sidebar.select_slider('Historic Brawl Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_paupercomm = st.sidebar.select_slider('Pauper Commander Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="restricted")
legality_duel = st.sidebar.select_slider('Duel Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="legal")
legality_oldschool = st.sidebar.select_slider('Oldschool Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="not_legal")
legality_premodern = st.sidebar.select_slider('Premodern Legality', ["legal","restricted","not_legal", "banned", "suspended"], value="not_legal")
games = st.sidebar.multiselect('Games', ["arena", "paper", "mtgo"], default=["arena", "paper", "mtgo"])
set = st.sidebar.text_input('Set', value="kld")
set_name = st.sidebar.text_input('Set Name', value="Kaladesh")
set_type = st.sidebar.select_slider('Set Type', settype_unique_list, value="expansion")
digital = st.sidebar.select_slider('Digital', [True,False], value=False)
rarity = st.sidebar.select_slider('Rarity', ['common','uncommon','rare','mythic'], value='uncommon')
flavor_text = st.sidebar.text_area('Flavor Text', value="")
artist = st.sidebar.text_input('Artist Name', value="<NAME>")
edhrec_rank = st.sidebar.number_input('EDHREC Rank', value=21000)
price_usd = st.sidebar.number_input('USD Price',step=1.,format="%.2f", value=0.07)
price_usdfoil = st.sidebar.number_input('USD Foil Price',step=1.,format="%.2f", value=0.13)
price_usdetched = st.sidebar.number_input('USD Etched Foil Price',step=1.,format="%.2f", value=0.13)
price_eur = st.sidebar.number_input('EUR Price',step=1.,format="%.2f", value=0.23)
price_eurfoil = st.sidebar.number_input('EUR Foil Price',step=1.,format="%.2f", value=0.30)
price_tix = st.sidebar.number_input('TIX Price',step=1.,format="%.2f", value=0.01)
loyalty = st.sidebar.select_slider('Planeswalker Loyalty', [0,1,2,3,4,5,6,7,"None"], value="None")
prints = st.sidebar.number_input('Prints', value=1)
image_uris = st.sidebar.text_input('Image uris', value="None")
image_uris_1 = st.sidebar.text_input('Image uris 1', value="None")
image_uris_2 = st.sidebar.text_input('Image uris 2', value="None")
card_faces = st.sidebar.text_input('Card Faces', value=None)
new_card = {"name": name,
"lang": lang,
"released_at": str(released_at),
"mana_cost": mana_cost,
"cmc": cmc,
"type_line": type_line,
"oracle_text": oracle_text,
"power": str(power),
"toughness": str(toughness),
"colors": colors,
"color_identity": color_identity,
"keywords": keywords,
"legalities": {"standard": legality_standard,
"alchemy": legality_alchemy,
"future": legality_future,
"historic": legality_historic,
"gladiator": legality_gladiator,
"pioneer": legality_pioneer,
"modern": legality_modern,
"legacy": legality_legacy,
"pauper": legality_pauper,
"vintage": legality_vintage,
"penny": legality_penny,
"commander": legality_commander,
"brawl": legality_brawl,
"historicbrawl": legality_histbrawl,
"paupercommander": legality_paupercomm,
"duel": legality_duel,
"oldschool": legality_oldschool,
"premodern": legality_premodern},
"games": games,
"set": set,
"set_name": set_name,
"set_type": set_type,
"digital": digital,
"rarity": rarity,
"flavor_text": flavor_text,
"artist": artist,
"edhrec_rank": edhrec_rank,
"prices": {"usd": price_usd,
"usd_foil": price_usdfoil,
"usd_etched": price_usdetched,
"eur": price_eur,
"eur_foil": price_eurfoil,
"tix": price_tix},
"loyalty": loyalty,
"prints": prints,
"image_uris": image_uris,
"card_faces": card_faces,
"oracle_text_1": oracle_text_1,
"oracle_text_2": oracle_text_2,
"image_uris_1": image_uris_1,
"image_uris_2": image_uris_2}
# Allow the user to download the card in JSON format
st.sidebar.download_button('Download JSON', json.dumps(new_card, ensure_ascii=False), file_name="new_card.json")
try:
st.sidebar.write(new_card)
except:
pass
# FROM DB
else:
if source_selection == 'FROM DB':
st.sidebar.write("Images are courtesy of the Scryfall API")
# Allow the user to select the set
selected_set = st.sidebar.selectbox('Select a set', sorted(card_db['set_name'].unique()))
# Allow the user to select a card
# Get the data from the selected card
selected_card = st.sidebar.selectbox('Select your card', card_db[card_db['set_name']==selected_set]['name'].unique())
selected_card_df = card_db[(card_db['set_name']==selected_set) & (card_db['name']==selected_card)]
# Show the Card Picture
try:
st.sidebar.image(ast.literal_eval(selected_card_df['image_uris'].values[0])['large'], width=200)
except:
st.sidebar.image([ast.literal_eval(selected_card_df['image_uris_1'].values[0])['large'],
ast.literal_eval(selected_card_df['image_uris_2'].values[0])['large']], width=200)
name = st.sidebar.text_input('Cardname', value=selected_card_df['name'].values[0])
lang = st.sidebar.text_input('Language', value=selected_card_df['lang'].values[0])
released_at = st.sidebar.date_input('Released at', value=datetime.datetime.strptime(selected_card_df['released_at'].values[0], '%Y-%m-%d'))
mana_cost = st.sidebar.text_input('Mana Cost', value=selected_card_df['mana_cost'].values[0])
cmc = st.sidebar.number_input('CMC', value=int(selected_card_df['cmc'].values[0]))
type_line = st.sidebar.text_input('Card Type', value=selected_card_df['type_line'].values[0])
oracle_text = st.sidebar.text_area('Oracle Text', value=selected_card_df['oracle_text'].values[0])
oracle_text_1 = st.sidebar.text_area('DFC: Oracle Text Face', value=selected_card_df['oracle_text_1'].values[0])
oracle_text_2 = st.sidebar.text_area('DFC: Oracle Text Back', value=selected_card_df['oracle_text_2'].values[0])
power = st.sidebar.select_slider('Power', ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15',"None"], value=selected_card_df['power'].values[0])
toughness = st.sidebar.select_slider('Toughness', ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15',"None"], value=selected_card_df['toughness'].values[0])
colors = st.sidebar.multiselect('Colors', ['W', 'U', 'B', 'R', 'G'], default=ast.literal_eval(selected_card_df['colors'].values[0]))
color_identity = st.sidebar.multiselect('Color Identity', ['W', 'U', 'B', 'R', 'G'], default=ast.literal_eval(selected_card_df['color_identity'].values[0]))
keywords = st.sidebar.multiselect('Keywords', keyword_unique_list, default=ast.literal_eval(selected_card_df['keywords'].values[0]))
legality_standard = st.sidebar.select_slider('Standard Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['standard'])
legality_alchemy = st.sidebar.select_slider('Alchemy Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['alchemy'])
legality_future = st.sidebar.select_slider('Future Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['future'])
legality_historic = st.sidebar.select_slider('Historic Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['historic'])
legality_gladiator = st.sidebar.select_slider('Gladiator Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['gladiator'])
legality_pioneer = st.sidebar.select_slider('Pioneer Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['pioneer'])
legality_modern = st.sidebar.select_slider('Modern Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['modern'])
legality_legacy = st.sidebar.select_slider('Legacy Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['legacy'])
legality_pauper = st.sidebar.select_slider('Pauper Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['pauper'])
legality_vintage = st.sidebar.select_slider('Vintage Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['vintage'])
legality_penny = st.sidebar.select_slider('Penny Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['penny'])
legality_commander = st.sidebar.select_slider('Commander Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['commander'])
legality_brawl = st.sidebar.select_slider('Brawl Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['brawl'])
legality_histbrawl = st.sidebar.select_slider('Historic Brawl Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['historicbrawl'])
legality_paupercomm = st.sidebar.select_slider('Pauper Commander Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['paupercommander'])
legality_duel = st.sidebar.select_slider('Duel Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['duel'])
legality_oldschool = st.sidebar.select_slider('Oldschool Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['oldschool'])
legality_premodern = st.sidebar.select_slider('Premodern Legality', ["legal","restricted","not_legal", "banned", "suspended"], value=ast.literal_eval(selected_card_df['legalities'].values[0])['premodern'])
games = st.sidebar.multiselect('Games', ["arena", "paper", "mtgo"], default=ast.literal_eval(selected_card_df['games'].values[0]))
set = st.sidebar.text_input('Set', value=selected_card_df['set'].values[0])
set_name = st.sidebar.text_input('Set Name', value=selected_card_df['set_name'].values[0])
set_type = st.sidebar.select_slider('Set Type', settype_unique_list, value=selected_card_df['set_type'].values[0])
digital = st.sidebar.select_slider('Digital', [True,False], value=selected_card_df['digital'].values[0])
rarity = st.sidebar.select_slider('Rarity', ['common','uncommon','rare','mythic'], value=selected_card_df['rarity'].values[0])
flavor_text = st.sidebar.text_area('Flavor Text', value=selected_card_df['flavor_text'].values[0])
artist = st.sidebar.text_input('Artist Name', value=selected_card_df['artist'].values[0])
edhrec_rank = st.sidebar.number_input('EDHREC Rank', value=int(selected_card_df['edhrec_rank'].values[0]))
price_usd = st.sidebar.number_input('USD Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['usd']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['usd'])))
price_usdfoil = st.sidebar.number_input('USD Foil Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['usd_foil']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['usd_foil'])))
price_usdetched = st.sidebar.number_input('USD Etched Foil Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['usd_etched']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['usd_etched'])))
price_eur = st.sidebar.number_input('EUR Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['eur']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['eur'])))
price_eurfoil = st.sidebar.number_input('EUR Foil Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['eur_foil']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['eur_foil'])))
price_tix = st.sidebar.number_input('TIX Price',step=1.,format="%.2f", value=float(np.where(ast.literal_eval(selected_card_df['prices'].values[0])['tix']==None,0,ast.literal_eval(selected_card_df['prices'].values[0])['tix'])))
loyalty = st.sidebar.select_slider('Planeswalker Loyalty', ['0','1','2','3','4','5','6','7',"None"], value=selected_card_df['loyalty'].values[0])
prints = st.sidebar.number_input('Prints', value=int(selected_card_df['prints'].values[0]))
try:
image_uris = st.sidebar.text_input('Image uris', value=ast.literal_eval(selected_card_df['image_uris'].values[0])['normal'])
except:
image_uris = st.sidebar.text_input('Image uris', value="None")
try:
image_uris_1 = st.sidebar.text_input('Image uris 1', value=ast.literal_eval(selected_card_df['image_uris_1'].values[0])['normal'])
except:
image_uris_1 = st.sidebar.text_input('Image uris_1', value="None")
try:
image_uris_2 = st.sidebar.text_input('Image uris 2', value=ast.literal_eval(selected_card_df['image_uris_1'].values[0])['normal'])
except:
image_uris_2 = st.sidebar.text_input('Image uris_2', value="None")
card_faces = st.sidebar.text_input('Card Faces', value=None)
new_card = {"name": name,
"lang": lang,
"released_at": str(released_at),
"mana_cost": mana_cost,
"cmc": cmc,
"type_line": type_line,
"oracle_text": oracle_text,
"power": str(power),
"toughness": str(toughness),
"colors": colors,
"color_identity": color_identity,
"keywords": keywords,
"legalities": {"standard": legality_standard,
"alchemy": legality_alchemy,
"future": legality_future,
"historic": legality_historic,
"gladiator": legality_gladiator,
"pioneer": legality_pioneer,
"modern": legality_modern,
"legacy": legality_legacy,
"pauper": legality_pauper,
"vintage": legality_vintage,
"penny": legality_penny,
"commander": legality_commander,
"brawl": legality_brawl,
"historicbrawl": legality_histbrawl,
"paupercommander": legality_paupercomm,
"duel": legality_duel,
"oldschool": legality_oldschool,
"premodern": legality_premodern},
"games": games,
"set": set,
"set_name": set_name,
"set_type": set_type,
"digital": digital,
"rarity": rarity,
"flavor_text": flavor_text,
"artist": artist,
"edhrec_rank": edhrec_rank,
"prices": {"usd": price_usd,
"usd_foil": price_usdfoil,
"usd_etched": price_usdetched,
"eur": price_eur,
"eur_foil": price_eurfoil,
"tix": price_tix},
"loyalty": loyalty,
"prints": prints,
"image_uris": image_uris,
"card_faces": card_faces,
"oracle_text_1": oracle_text_1,
"oracle_text_2": oracle_text_2,
"image_uris_1": image_uris_1,
"image_uris_2": image_uris_2}
# Allow the user to download the card in JSON format
st.sidebar.download_button('Download JSON', json.dumps(new_card, ensure_ascii=False), file_name="new_card.json")
try:
st.sidebar.write(new_card)
except:
pass
# Get the model type and the model target
model_type, target = (model).split("_",1)
if st.button('EXECUTE MODEL!'):
try:
with st.spinner('Executing...'):
# Transform the card to the data format required
# processed_card = transform_card(card_from_json)
processed_card = transform_card(new_card)
# Print it to the app
st.write("#### Card transformed to data:")
st.dataframe(processed_card)
# Load the models and define the target
lr_model, gb_model, dl_model, scaler, tokenizer, required_model_cols, max_seq_lens, selected_classes, int_to_color = load_models(model_type, target)
# PREDICTIONS
if model_type == "BIN":
# Obtain the predictions BINARY
prediction_response = predict_dummy_binary(processed_card, tokenizer, max_seq_lens, scaler, required_model_cols, lr_model, gb_model, dl_model)
else:
if model_type == "MULT":
# Obtain the predictions MULTICLASS
prediction_response = predict_dummy_multiclass(processed_card, tokenizer, max_seq_lens, scaler, required_model_cols, selected_classes, int_to_color, lr_model, gb_model, dl_model)
else:
if model_type == "REGR":
# for cmc
minimum_val = 0
# Obtain the NUMERIC
prediction_response = predict_dummy_numeric(processed_card, tokenizer, max_seq_lens, scaler, required_model_cols, minimum_val, lr_model, gb_model, dl_model)
else:
pass
# Print the predictions to the app
# Predictions in Markup format
st.write("#### Predictions:")
for i in prediction_response:
st.write(f"##### {i}")
prediction_df = | pd.DataFrame([prediction_response[i]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 08:59:17 2019
@author: <NAME>
@contact: <EMAIL>
"""
import pandas as pd
def relative_strong_signal(data,threshold,val_threshold):
""" This function compute date based sectional
relative strong/weak indicator given dataframe with
structure = {"row":"dates","col":"product price"}
we select the dates when market overal drops/raise
but a very small portion(threshold) of it goes opposite
Output dataframe gives relative strong/weak indicator
with that date return and associated quantile
"""
answers = {} # Output={"Product":[Dates,return,threshold]..}
ans_df = | pd.DataFrame() | pandas.DataFrame |
"""
Utilities for loading datasets
"""
import os
import pandas as pd
from ..utils.load_data import load_from_tsfile_to_dataframe
__all__ = [
"load_airline",
"load_gunpoint",
"load_arrow_head",
"load_italy_power_demand",
"load_basic_motions",
"load_japanese_vowels",
"load_shampoo_sales",
"load_longley",
"load_lynx"
]
__author__ = ['<NAME>', '<NAME>', '@big-o']
DIRNAME = 'data'
MODULE = os.path.dirname(__file__)
# time series classification data sets
def _load_dataset(name, split, return_X_y):
"""
Helper function to load time series classification datasets.
"""
if split in ("train", "test"):
fname = name + '_' + split.upper() + '.ts'
abspath = os.path.join(MODULE, DIRNAME, name, fname)
X, y = load_from_tsfile_to_dataframe(abspath)
# if split is None, load both train and test set
elif split is None:
X = pd.DataFrame(dtype="object")
y = pd.Series(dtype="object")
for split in ("train", "test"):
fname = name + '_' + split.upper() + '.ts'
abspath = os.path.join(MODULE, DIRNAME, name, fname)
result = load_from_tsfile_to_dataframe(abspath)
X = pd.concat([X, pd.DataFrame(result[0])])
y = pd.concat([y, pd.Series(result[1])])
else:
raise ValueError("Invalid `split` value")
# Return appropriately
if return_X_y:
return X, y
else:
X['class_val'] = | pd.Series(y) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.